aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'clang/test/CodeGen/RISCV')
-rw-r--r--clang/test/CodeGen/RISCV/riscv-inline-asm-rvv.c36
-rw-r--r--clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb-error.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb.c15
-rw-r--r--clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbc.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbr.c75
-rw-r--r--clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbb.c27
-rw-r--r--clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbc.c48
-rw-r--r--clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbr.c99
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c2587
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c1328
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c (renamed from clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c)184
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c2587
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c (renamed from clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c)44
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c1579
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c551
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c190
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c560
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c560
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c551
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c146
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c551
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c560
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c560
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c551
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c1535
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c561
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c561
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c561
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c561
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c291
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c291
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c579
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c1633
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c291
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c551
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c491
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c1118
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c255
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c255
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c251
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c255
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c255
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c331
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c491
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c334
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c338
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c859
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c6125
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c965
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c6123
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c2566
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c1466
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c548
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c548
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c548
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c548
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c2566
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c197
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c2584
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c1287
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c197
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c2599
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c2599
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c2584
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c197
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c6168
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c1793
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c848
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c848
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c4785
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c1240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c2587
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c1328
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c1819
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c820
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c1348
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c1323
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c1646
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c1631
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c1287
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c6523
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c1240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c1240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c1813
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c1290
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c1305
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c2587
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c6523
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c3524
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c3186
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c2528
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c1124
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c3524
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c820
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c2587
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c1328
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c2587
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c1579
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c551
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c196
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c560
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c560
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c551
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c146
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c551
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c560
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c560
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c551
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c1535
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c561
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c561
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c561
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c561
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c291
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c291
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c579
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c1633
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c296
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c291
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c551
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c491
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c1118
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c255
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c255
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c251
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c255
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c255
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c171
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c171
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c491
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c598
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c646
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c117
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c2348
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c6123
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c1862
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c6123
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmadc.c2566
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c2473
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c92
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c1466
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c548
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c272
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c548
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c548
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c548
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbc.c2566
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c211
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c2584
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c92
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c1287
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c211
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c2599
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c2599
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c2584
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c211
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c6168
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c106
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c1793
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c848
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c848
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c204
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c1372
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c4785
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c1240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c2587
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c1328
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c117
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c6
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c820
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c1348
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c1323
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c1646
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c1631
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c1287
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c6523
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c1240
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c1241
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c1813
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c1290
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c1305
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c2587
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c6523
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c3524
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c3186
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c2528
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c1124
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c3524
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c2472
-rw-r--r--clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c820
256 files changed, 342752 insertions, 130 deletions
diff --git a/clang/test/CodeGen/RISCV/riscv-inline-asm-rvv.c b/clang/test/CodeGen/RISCV/riscv-inline-asm-rvv.c
new file mode 100644
index 000000000000..14558778278e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/riscv-inline-asm-rvv.c
@@ -0,0 +1,36 @@
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v \
+// RUN: -O2 -emit-llvm %s -o - \
+// RUN: | FileCheck %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \
+// RUN: -O2 -emit-llvm %s -o - \
+// RUN: | FileCheck %s
+
+// Test RISC-V V-extension specific inline assembly constraints.
+#include <riscv_vector.h>
+
+void test_v_reg() {
+ asm volatile(
+ "vsetvli x1, x0, e32,m2,tu,mu\n"
+ "vadd.vv v1, v2, v3, v0.t"
+ :
+ :
+ : "v1", "x1");
+// CHECK-LABEL: define{{.*}} @test_v_reg
+// CHECK: "~{v1},~{x1}"
+}
+
+vint32m1_t test_vr(vint32m1_t a, vint32m1_t b) {
+// CHECK-LABEL: define{{.*}} @test_vr
+// CHECK: %0 = tail call <vscale x 2 x i32> asm sideeffect "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b)
+ vint32m1_t ret;
+ asm volatile ("vadd.vv %0, %1, %2" : "=vr"(ret) : "vr"(a), "vr"(b));
+ return ret;
+}
+
+vbool1_t test_vm(vbool1_t a, vbool1_t b) {
+// CHECK-LABEL: define{{.*}} @test_vm
+// CHECK: %0 = tail call <vscale x 64 x i1> asm sideeffect "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b)
+ vbool1_t ret;
+ asm volatile ("vmand.mm %0, %1, %2" : "=vm"(ret) : "vm"(a), "vm"(b));
+ return ret;
+}
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb-error.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb-error.c
new file mode 100644
index 000000000000..ad864aa65fef
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb-error.c
@@ -0,0 +1,6 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-zbb -verify %s -o -
+
+int orc_b_64(int a) {
+ return __builtin_riscv_orc_b_64(a); // expected-error {{builtin requires 'RV64' extension support to be enabled}}
+}
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb.c
new file mode 100644
index 000000000000..593b52698d35
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbb.c
@@ -0,0 +1,15 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-zbb -emit-llvm %s -o - \
+// RUN: | FileCheck %s -check-prefix=RV32ZBB
+
+// RV32ZBB-LABEL: @orc_b_32(
+// RV32ZBB-NEXT: entry:
+// RV32ZBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBB-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV32ZBB-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV32ZBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.orc.b.i32(i32 [[TMP0]])
+// RV32ZBB-NEXT: ret i32 [[TMP1]]
+//
+int orc_b_32(int a) {
+ return __builtin_riscv_orc_b_32(a);
+}
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbc.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbc.c
new file mode 100644
index 000000000000..c07bd91fb40e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbc.c
@@ -0,0 +1,48 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-zbc -emit-llvm %s -o - \
+// RUN: | FileCheck %s -check-prefix=RV32ZBC
+
+// RV32ZBC-LABEL: @clmul(
+// RV32ZBC-NEXT: entry:
+// RV32ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBC-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV32ZBC-NEXT: store i32 [[B:%.*]], i32* [[B_ADDR]], align 4
+// RV32ZBC-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV32ZBC-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4
+// RV32ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
+// RV32ZBC-NEXT: ret i32 [[TMP2]]
+//
+long clmul(long a, long b) {
+ return __builtin_riscv_clmul(a, b);
+}
+
+// RV32ZBC-LABEL: @clmulh(
+// RV32ZBC-NEXT: entry:
+// RV32ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBC-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV32ZBC-NEXT: store i32 [[B:%.*]], i32* [[B_ADDR]], align 4
+// RV32ZBC-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV32ZBC-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4
+// RV32ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[TMP0]], i32 [[TMP1]])
+// RV32ZBC-NEXT: ret i32 [[TMP2]]
+//
+long clmulh(long a, long b) {
+ return __builtin_riscv_clmulh(a, b);
+}
+
+// RV32ZBC-LABEL: @clmulr(
+// RV32ZBC-NEXT: entry:
+// RV32ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBC-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV32ZBC-NEXT: store i32 [[B:%.*]], i32* [[B_ADDR]], align 4
+// RV32ZBC-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV32ZBC-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4
+// RV32ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmulr.i32(i32 [[TMP0]], i32 [[TMP1]])
+// RV32ZBC-NEXT: ret i32 [[TMP2]]
+//
+long clmulr(long a, long b) {
+ return __builtin_riscv_clmulr(a, b);
+}
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbr.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbr.c
new file mode 100644
index 000000000000..f698f38dd1d3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbr.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-zbr -emit-llvm %s -o - \
+// RUN: | FileCheck %s -check-prefix=RV32ZBR
+
+// RV32ZBR-LABEL: @crc32_b(
+// RV32ZBR-NEXT: entry:
+// RV32ZBR-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBR-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.crc32.b.i32(i32 [[TMP0]])
+// RV32ZBR-NEXT: ret i32 [[TMP1]]
+//
+long crc32_b(long a) {
+ return __builtin_riscv_crc32_b(a);
+}
+
+// RV32ZBR-LABEL: @crc32_h(
+// RV32ZBR-NEXT: entry:
+// RV32ZBR-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBR-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.crc32.h.i32(i32 [[TMP0]])
+// RV32ZBR-NEXT: ret i32 [[TMP1]]
+//
+long crc32_h(long a) {
+ return __builtin_riscv_crc32_h(a);
+}
+
+// RV32ZBR-LABEL: @crc32_w(
+// RV32ZBR-NEXT: entry:
+// RV32ZBR-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBR-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.crc32.w.i32(i32 [[TMP0]])
+// RV32ZBR-NEXT: ret i32 [[TMP1]]
+//
+long crc32_w(long a) {
+ return __builtin_riscv_crc32_w(a);
+}
+
+// RV32ZBR-LABEL: @crc32c_b(
+// RV32ZBR-NEXT: entry:
+// RV32ZBR-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBR-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.crc32c.b.i32(i32 [[TMP0]])
+// RV32ZBR-NEXT: ret i32 [[TMP1]]
+//
+long crc32c_b(long a) {
+ return __builtin_riscv_crc32c_b(a);
+}
+
+// RV32ZBR-LABEL: @crc32c_h(
+// RV32ZBR-NEXT: entry:
+// RV32ZBR-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBR-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.crc32c.h.i32(i32 [[TMP0]])
+// RV32ZBR-NEXT: ret i32 [[TMP1]]
+//
+long crc32c_h(long a) {
+ return __builtin_riscv_crc32c_h(a);
+}
+
+// RV32ZBR-LABEL: @crc32c_w(
+// RV32ZBR-NEXT: entry:
+// RV32ZBR-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV32ZBR-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV32ZBR-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.crc32c.w.i32(i32 [[TMP0]])
+// RV32ZBR-NEXT: ret i32 [[TMP1]]
+//
+long crc32c_w(long a) {
+ return __builtin_riscv_crc32c_w(a);
+}
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbb.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbb.c
new file mode 100644
index 000000000000..1fa3b01bb7ca
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbb.c
@@ -0,0 +1,27 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-zbb -emit-llvm %s -o - \
+// RUN: | FileCheck %s -check-prefix=RV64ZBB
+
+// RV64ZBB-LABEL: @orc_b_32(
+// RV64ZBB-NEXT: entry:
+// RV64ZBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// RV64ZBB-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4
+// RV64ZBB-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// RV64ZBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.orc.b.i32(i32 [[TMP0]])
+// RV64ZBB-NEXT: ret i32 [[TMP1]]
+//
+int orc_b_32(int a) {
+ return __builtin_riscv_orc_b_32(a);
+}
+
+// RV64ZBB-LABEL: @orc_b_64(
+// RV64ZBB-NEXT: entry:
+// RV64ZBB-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBB-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBB-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBB-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.orc.b.i64(i64 [[TMP0]])
+// RV64ZBB-NEXT: ret i64 [[TMP1]]
+//
+long orc_b_64(long a) {
+ return __builtin_riscv_orc_b_64(a);
+}
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbc.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbc.c
new file mode 100644
index 000000000000..4dffc3aec9d2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbc.c
@@ -0,0 +1,48 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-zbc -emit-llvm %s -o - \
+// RUN: | FileCheck %s -check-prefix=RV64ZBC
+
+// RV64ZBC-LABEL: @clmul(
+// RV64ZBC-NEXT: entry:
+// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBC-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBC-NEXT: store i64 [[B:%.*]], i64* [[B_ADDR]], align 8
+// RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, i64* [[B_ADDR]], align 8
+// RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[TMP0]], i64 [[TMP1]])
+// RV64ZBC-NEXT: ret i64 [[TMP2]]
+//
+long clmul(long a, long b) {
+ return __builtin_riscv_clmul(a, b);
+}
+
+// RV64ZBC-LABEL: @clmulh(
+// RV64ZBC-NEXT: entry:
+// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBC-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBC-NEXT: store i64 [[B:%.*]], i64* [[B_ADDR]], align 8
+// RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, i64* [[B_ADDR]], align 8
+// RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[TMP0]], i64 [[TMP1]])
+// RV64ZBC-NEXT: ret i64 [[TMP2]]
+//
+long clmulh(long a, long b) {
+ return __builtin_riscv_clmulh(a, b);
+}
+
+// RV64ZBC-LABEL: @clmulr(
+// RV64ZBC-NEXT: entry:
+// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBC-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBC-NEXT: store i64 [[B:%.*]], i64* [[B_ADDR]], align 8
+// RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, i64* [[B_ADDR]], align 8
+// RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmulr.i64(i64 [[TMP0]], i64 [[TMP1]])
+// RV64ZBC-NEXT: ret i64 [[TMP2]]
+//
+long clmulr(long a, long b) {
+ return __builtin_riscv_clmulr(a, b);
+}
diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbr.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbr.c
new file mode 100644
index 000000000000..5f9453b88add
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbr.c
@@ -0,0 +1,99 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-zbr -emit-llvm %s -o - \
+// RUN: | FileCheck %s -check-prefix=RV64ZBR
+
+// RV64ZBR-LABEL: @crc32_b(
+// RV64ZBR-NEXT: entry:
+// RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32.b.i64(i64 [[TMP0]])
+// RV64ZBR-NEXT: ret i64 [[TMP1]]
+//
+long crc32_b(long a) {
+ return __builtin_riscv_crc32_b(a);
+}
+
+// RV64ZBR-LABEL: @crc32_h(
+// RV64ZBR-NEXT: entry:
+// RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32.h.i64(i64 [[TMP0]])
+// RV64ZBR-NEXT: ret i64 [[TMP1]]
+//
+long crc32_h(long a) {
+ return __builtin_riscv_crc32_h(a);
+}
+
+// RV64ZBR-LABEL: @crc32_w(
+// RV64ZBR-NEXT: entry:
+// RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32.w.i64(i64 [[TMP0]])
+// RV64ZBR-NEXT: ret i64 [[TMP1]]
+//
+long crc32_w(long a) {
+ return __builtin_riscv_crc32_w(a);
+}
+
+// RV64ZBR-LABEL: @crc32c_b(
+// RV64ZBR-NEXT: entry:
+// RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32c.b.i64(i64 [[TMP0]])
+// RV64ZBR-NEXT: ret i64 [[TMP1]]
+//
+long crc32c_b(long a) {
+ return __builtin_riscv_crc32c_b(a);
+}
+
+// RV64ZBR-LABEL: @crc32c_h(
+// RV64ZBR-NEXT: entry:
+// RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32c.h.i64(i64 [[TMP0]])
+// RV64ZBR-NEXT: ret i64 [[TMP1]]
+//
+long crc32c_h(long a) {
+ return __builtin_riscv_crc32c_h(a);
+}
+
+// RV64ZBR-LABEL: @crc32c_w(
+// RV64ZBR-NEXT: entry:
+// RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32c.w.i64(i64 [[TMP0]])
+// RV64ZBR-NEXT: ret i64 [[TMP1]]
+//
+long crc32c_w(long a) {
+ return __builtin_riscv_crc32c_w(a);
+}
+
+// RV64ZBR-LABEL: @crc32_d(
+// RV64ZBR-NEXT: entry:
+// RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32.d.i64(i64 [[TMP0]])
+// RV64ZBR-NEXT: ret i64 [[TMP1]]
+//
+long crc32_d(long a) {
+ return __builtin_riscv_crc32_d(a);
+}
+
+// RV64ZBR-LABEL: @crc32c_d(
+// RV64ZBR-NEXT: entry:
+// RV64ZBR-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
+// RV64ZBR-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// RV64ZBR-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.crc32c.d.i64(i64 [[TMP0]])
+// RV64ZBR-NEXT: ret i64 [[TMP1]]
+//
+long crc32c_d(long a) {
+ return __builtin_riscv_crc32c_d(a);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
new file mode 100644
index 000000000000..179028804e07
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c
@@ -0,0 +1,2587 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vaadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vaaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vaadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vaaddu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
new file mode 100644
index 000000000000..8e6c592a2595
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadc.c
@@ -0,0 +1,1328 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
+ vbool2_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc(op1, op2, carryin, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
index 33073587e74c..fc783e7bdac7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c
@@ -1,14 +1,14 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
-// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t
-// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
-#include <riscv_vector_generic.h>
+#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vadd_vv_i8mf8(
// CHECK-RV32-NEXT: entry:
@@ -1253,7 +1253,7 @@ vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i8mf8_m(
@@ -1267,7 +1267,7 @@ vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i8mf4_m(
@@ -1281,7 +1281,7 @@ vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i8mf4_m(
@@ -1295,7 +1295,7 @@ vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i8mf2_m(
@@ -1309,7 +1309,7 @@ vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i8mf2_m(
@@ -1323,7 +1323,7 @@ vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i8m1_m(
@@ -1337,7 +1337,7 @@ vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i8m1_m(
@@ -1351,7 +1351,7 @@ vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i8m2_m(
@@ -1365,7 +1365,7 @@ vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i8m2_m(
@@ -1379,7 +1379,7 @@ vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i8m4_m(
@@ -1393,7 +1393,7 @@ vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i8m4_m(
@@ -1407,7 +1407,7 @@ vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i8m8_m(
@@ -1421,7 +1421,7 @@ vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i8m8_m(
@@ -1435,7 +1435,7 @@ vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i16mf4_m(
@@ -1449,7 +1449,7 @@ vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1,
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i16mf4_m(
@@ -1463,7 +1463,7 @@ vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i16mf2_m(
@@ -1477,7 +1477,7 @@ vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i16mf2_m(
@@ -1491,7 +1491,7 @@ vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i16m1_m(
@@ -1505,7 +1505,7 @@ vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i16m1_m(
@@ -1519,7 +1519,7 @@ vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i16m2_m(
@@ -1533,7 +1533,7 @@ vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i16m2_m(
@@ -1547,7 +1547,7 @@ vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i16m4_m(
@@ -1561,7 +1561,7 @@ vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i16m4_m(
@@ -1575,7 +1575,7 @@ vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i16m8_m(
@@ -1589,7 +1589,7 @@ vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i16m8_m(
@@ -1603,7 +1603,7 @@ vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i32mf2_m(
@@ -1617,7 +1617,7 @@ vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i32mf2_m(
@@ -1631,7 +1631,7 @@ vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i32m1_m(
@@ -1645,7 +1645,7 @@ vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i32m1_m(
@@ -1659,7 +1659,7 @@ vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i32m2_m(
@@ -1673,7 +1673,7 @@ vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i32m2_m(
@@ -1687,7 +1687,7 @@ vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i32m4_m(
@@ -1701,7 +1701,7 @@ vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i32m4_m(
@@ -1715,7 +1715,7 @@ vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i32m8_m(
@@ -1729,7 +1729,7 @@ vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i32m8_m(
@@ -1743,7 +1743,7 @@ vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i64m1_m(
@@ -1757,7 +1757,7 @@ vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i64m1_m(
@@ -1771,7 +1771,7 @@ vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i64m2_m(
@@ -1785,7 +1785,7 @@ vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i64m2_m(
@@ -1799,7 +1799,7 @@ vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i64m4_m(
@@ -1813,7 +1813,7 @@ vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i64m4_m(
@@ -1827,7 +1827,7 @@ vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_i64m8_m(
@@ -1841,7 +1841,7 @@ vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_i64m8_m(
@@ -1855,7 +1855,7 @@ vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u8mf8_m(
@@ -1869,7 +1869,7 @@ vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u8mf8_m(
@@ -1883,7 +1883,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u8mf4_m(
@@ -1897,7 +1897,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u8mf4_m(
@@ -1911,7 +1911,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u8mf2_m(
@@ -1925,7 +1925,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u8mf2_m(
@@ -1939,7 +1939,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u8m1_m(
@@ -1953,7 +1953,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u8m1_m(
@@ -1967,7 +1967,7 @@ vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u8m2_m(
@@ -1981,7 +1981,7 @@ vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u8m2_m(
@@ -1995,7 +1995,7 @@ vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u8m4_m(
@@ -2009,7 +2009,7 @@ vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u8m4_m(
@@ -2023,7 +2023,7 @@ vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u8m8_m(
@@ -2037,7 +2037,7 @@ vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u8m8_m(
@@ -2051,7 +2051,7 @@ vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u16mf4_m(
@@ -2065,7 +2065,7 @@ vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u16mf4_m(
@@ -2079,7 +2079,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u16mf2_m(
@@ -2093,7 +2093,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u16mf2_m(
@@ -2107,7 +2107,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u16m1_m(
@@ -2121,7 +2121,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u16m1_m(
@@ -2135,7 +2135,7 @@ vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u16m2_m(
@@ -2149,7 +2149,7 @@ vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u16m2_m(
@@ -2163,7 +2163,7 @@ vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u16m4_m(
@@ -2177,7 +2177,7 @@ vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u16m4_m(
@@ -2191,7 +2191,7 @@ vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u16m8_m(
@@ -2205,7 +2205,7 @@ vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u16m8_m(
@@ -2219,7 +2219,7 @@ vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u32mf2_m(
@@ -2233,7 +2233,7 @@ vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u32mf2_m(
@@ -2247,7 +2247,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u32m1_m(
@@ -2261,7 +2261,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u32m1_m(
@@ -2275,7 +2275,7 @@ vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u32m2_m(
@@ -2289,7 +2289,7 @@ vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u32m2_m(
@@ -2303,7 +2303,7 @@ vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u32m4_m(
@@ -2317,7 +2317,7 @@ vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u32m4_m(
@@ -2331,7 +2331,7 @@ vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u32m8_m(
@@ -2345,7 +2345,7 @@ vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u32m8_m(
@@ -2359,7 +2359,7 @@ vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u64m1_m(
@@ -2373,7 +2373,7 @@ vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u64m1_m(
@@ -2387,7 +2387,7 @@ vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u64m2_m(
@@ -2401,7 +2401,7 @@ vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u64m2_m(
@@ -2415,7 +2415,7 @@ vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u64m4_m(
@@ -2429,7 +2429,7 @@ vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u64m4_m(
@@ -2443,7 +2443,7 @@ vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vv_u64m8_m(
@@ -2457,7 +2457,7 @@ vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vadd_vx_u64m8_m(
@@ -2471,6 +2471,6 @@ vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
- return vadd_m(mask, maskedoff, op1, op2, vl);
+ return vadd(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
new file mode 100644
index 000000000000..1a3d0a060c01
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vand(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
new file mode 100644
index 000000000000..ac623811db8a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c
@@ -0,0 +1,2587 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vasub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vasubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vasub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vasubu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
new file mode 100644
index 000000000000..3a412c0257c4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vdivu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vdivu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
index d0389a333686..25fc2fe4e101 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-generic/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c
@@ -1,14 +1,14 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
-// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t
-// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
-#include <riscv_vector_generic.h>
+#include <riscv_vector.h>
// CHECK-RV32-LABEL: @test_vfadd_vv_f32mf2(
// CHECK-RV32-NEXT: entry:
@@ -273,7 +273,7 @@ vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vf_f32mf2_m(
@@ -287,7 +287,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m1_m(
@@ -301,7 +301,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vf
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m1_m(
@@ -315,7 +315,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m2_m(
@@ -329,7 +329,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m2_m(
@@ -343,7 +343,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m4_m(
@@ -357,7 +357,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m4_m(
@@ -371,7 +371,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vv_f32m8_m(
@@ -385,7 +385,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vf_f32m8_m(
@@ -399,7 +399,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m1_m(
@@ -413,7 +413,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m1_m(
@@ -427,7 +427,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m2_m(
@@ -441,7 +441,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m2_m(
@@ -455,7 +455,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m4_m(
@@ -469,7 +469,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m4_m(
@@ -483,7 +483,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vv_f64m8_m(
@@ -497,7 +497,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloa
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV32-LABEL: @test_vfadd_vf_f64m8_m(
@@ -511,6 +511,6 @@ vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
- return vfadd_m(mask, maskedoff, op1, op2, vl);
+ return vfadd(mask, maskedoff, op1, op2, vl);
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c
new file mode 100644
index 000000000000..77e9feed037b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
+ return vfclass(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
+ return vfclass(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
+ return vfclass(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
+ return vfclass(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
+ return vfclass(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
+ return vfclass(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
+ return vfclass(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
+ return vfclass(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
+ return vfclass(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vfloat32mf2_t op1, size_t vl) {
+ return vfclass(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vfloat32m1_t op1, size_t vl) {
+ return vfclass(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vfloat32m2_t op1, size_t vl) {
+ return vfclass(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vfloat32m4_t op1, size_t vl) {
+ return vfclass(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vfloat32m8_t op1, size_t vl) {
+ return vfclass(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vfloat64m1_t op1, size_t vl) {
+ return vfclass(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vfloat64m2_t op1, size_t vl) {
+ return vfclass(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vfloat64m4_t op1, size_t vl) {
+ return vfclass(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vfloat64m8_t op1, size_t vl) {
+ return vfclass(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c
new file mode 100644
index 000000000000..640eebf31e5c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c
@@ -0,0 +1,1579 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
+ return vfcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
+ return vfcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
+ return vfcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
+ return vfcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
+ return vfcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
+ return vfcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
+ return vfcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
+ return vfcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
+ return vfcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
+ return vfcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
+ return vfcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
+ return vfcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
+ return vfcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
+ return vfcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
+ return vfcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
+ return vfcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
+ return vfcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
+ return vfcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
+ return vfcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
+ return vfcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
+ return vfcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
+ return vfcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
+ return vfcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
+ return vfcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
+ return vfcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
+ return vfcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
+ return vfcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
+ return vfcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
+ return vfcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
+ return vfcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
+ return vfcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
+ return vfcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
+ return vfcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
+ return vfcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
+ return vfcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
+ return vfcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
+ return vfcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask,
+ vuint32mf2_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vint32mf2_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vint32m1_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vint32m2_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vint32m4_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vint32m8_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vuint32mf2_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vuint32m1_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vuint32m2_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vuint32m4_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vuint32m8_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vint64m1_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vint64m2_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vint64m4_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vint64m8_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vuint64m1_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vuint64m2_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vuint64m4_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vuint64m8_t src, size_t vl) {
+ return vfcvt_f(mask, maskedoff, src, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
new file mode 100644
index 000000000000..c38fdbea7cbb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c
@@ -0,0 +1,551 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfdiv(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c
new file mode 100644
index 000000000000..4cfa5df5e08f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfirst.c
@@ -0,0 +1,190 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b1(vbool1_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b2(vbool2_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b4(vbool4_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b8(vbool8_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b16(vbool16_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b32(vbool32_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b64(vbool64_t op1, size_t vl) { return vfirst(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
+ return vfirst(mask, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c
new file mode 100644
index 000000000000..130f599defe1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c
@@ -0,0 +1,560 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmacc(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c
new file mode 100644
index 000000000000..fbc3a421e62b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c
@@ -0,0 +1,560 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmadd(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
new file mode 100644
index 000000000000..a77883f52892
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c
@@ -0,0 +1,551 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmax(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
new file mode 100644
index 000000000000..d1c5a308fdfe
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c
@@ -0,0 +1,146 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
+ float op2, size_t vl) {
+ return vfmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2,
+ size_t vl) {
+ return vfmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2,
+ size_t vl) {
+ return vfmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2,
+ size_t vl) {
+ return vfmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2,
+ size_t vl) {
+ return vfmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1,
+ double op2, size_t vl) {
+ return vfmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1,
+ double op2, size_t vl) {
+ return vfmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1,
+ double op2, size_t vl) {
+ return vfmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2,
+ size_t vl) {
+ return vfmerge(mask, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
new file mode 100644
index 000000000000..c471c8451b1e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c
@@ -0,0 +1,551 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmin(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c
new file mode 100644
index 000000000000..ad19ae848f6a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c
@@ -0,0 +1,560 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsac(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c
new file mode 100644
index 000000000000..91c393d46f63
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c
@@ -0,0 +1,560 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsub(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
new file mode 100644
index 000000000000..245231157a36
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c
@@ -0,0 +1,551 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmul(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c
new file mode 100644
index 000000000000..af9394f63980
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c
@@ -0,0 +1,1535 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
+ return vfncvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
+ return vfncvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
+ return vfncvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
+ return vfncvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
+ return vfncvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
+ return vfncvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
+ return vfncvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
+ return vfncvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
+ return vfncvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
+ return vfncvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
+ return vfncvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
+ return vfncvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
+ return vfncvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
+ return vfncvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
+ return vfncvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
+ return vfncvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
+ return vfncvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
+ return vfncvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
+ return vfncvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
+ return vfncvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_rod_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_rod_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_rod_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_rod_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfncvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask,
+ vint16mf4_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfncvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfncvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask,
+ vint16mf2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfncvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfncvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfncvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfncvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfncvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfncvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfncvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfncvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask,
+ vuint16mf4_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfncvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfncvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask,
+ vuint16mf2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfncvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfncvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask,
+ vuint16m1_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfncvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfncvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfncvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfncvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfncvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask,
+ vint32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask,
+ vuint32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask,
+ vuint32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask,
+ vuint32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vint64m1_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vint64m2_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vint64m4_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vint64m8_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vuint64m1_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vuint64m2_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vuint64m4_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vuint64m8_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_rod_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask,
+ vfloat32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_rod_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask,
+ vfloat32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_rod_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask,
+ vfloat32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_rod_f(mask, maskedoff, src, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c
new file mode 100644
index 000000000000..816d4aff50a1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c
@@ -0,0 +1,561 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ double op1, vfloat64m8_t op2, size_t vl) {
+ return vfnmacc(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c
new file mode 100644
index 000000000000..7bddabb00c2a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c
@@ -0,0 +1,561 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ double op1, vfloat64m8_t op2, size_t vl) {
+ return vfnmadd(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c
new file mode 100644
index 000000000000..dbffecdc2cc3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c
@@ -0,0 +1,561 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ double op1, vfloat64m8_t op2, size_t vl) {
+ return vfnmsac(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c
new file mode 100644
index 000000000000..ae28fcdf4bcf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c
@@ -0,0 +1,561 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ double op1, vfloat64m8_t op2, size_t vl) {
+ return vfnmsub(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
new file mode 100644
index 000000000000..46432daf711a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfrdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfrdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfrdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfrdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfrdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfrdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfrdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfrdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfrdiv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfrdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfrdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfrdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfrdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfrdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfrdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfrdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfrdiv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfrdiv(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c
new file mode 100644
index 000000000000..48340499961c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
+ return vfrec7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
+ return vfrec7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
+ return vfrec7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
+ return vfrec7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
+ return vfrec7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
+ return vfrec7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
+ return vfrec7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
+ return vfrec7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
+ return vfrec7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, size_t vl) {
+ return vfrec7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, size_t vl) {
+ return vfrec7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, size_t vl) {
+ return vfrec7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, size_t vl) {
+ return vfrec7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, size_t vl) {
+ return vfrec7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, size_t vl) {
+ return vfrec7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, size_t vl) {
+ return vfrec7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, size_t vl) {
+ return vfrec7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, size_t vl) {
+ return vfrec7(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
new file mode 100644
index 000000000000..c852f998b8ab
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
@@ -0,0 +1,291 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
new file mode 100644
index 000000000000..daf7cd77a4b4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
@@ -0,0 +1,291 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
new file mode 100644
index 000000000000..5d032716bc29
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
@@ -0,0 +1,579 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst,
+ vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c
new file mode 100644
index 000000000000..d24dbd6e0a87
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
+ return vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
+ return vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
+ return vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
+ return vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
+ return vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
+ return vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
+ return vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
+ return vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
+ return vfrsqrt7(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, size_t vl) {
+ return vfrsqrt7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, size_t vl) {
+ return vfrsqrt7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, size_t vl) {
+ return vfrsqrt7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, size_t vl) {
+ return vfrsqrt7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, size_t vl) {
+ return vfrsqrt7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, size_t vl) {
+ return vfrsqrt7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, size_t vl) {
+ return vfrsqrt7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, size_t vl) {
+ return vfrsqrt7(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, size_t vl) {
+ return vfrsqrt7(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
new file mode 100644
index 000000000000..9a69688bd937
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfrsub(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
new file mode 100644
index 000000000000..296d97f3f703
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c
@@ -0,0 +1,1633 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnj(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnjn(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnjx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnj(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2,
+ size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnjn(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2,
+ size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnjx(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
new file mode 100644
index 000000000000..60847f48ff93
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c
@@ -0,0 +1,296 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i32(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
+ size_t vl) {
+ return vfslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i32(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
+ size_t vl) {
+ return vfslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i32(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
+ size_t vl) {
+ return vfslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i32(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
+ size_t vl) {
+ return vfslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i32(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
+ size_t vl) {
+ return vfslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i32(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
+ size_t vl) {
+ return vfslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i32(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
+ size_t vl) {
+ return vfslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i32(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
+ size_t vl) {
+ return vfslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i32(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value,
+ size_t vl) {
+ return vfslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vfloat32mf2_t src, float value,
+ size_t vl) {
+ return vfslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask,
+ vfloat32m1_t maskedoff,
+ vfloat32m1_t src, float value,
+ size_t vl) {
+ return vfslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask,
+ vfloat32m2_t maskedoff,
+ vfloat32m2_t src, float value,
+ size_t vl) {
+ return vfslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t src, float value,
+ size_t vl) {
+ return vfslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t src, float value,
+ size_t vl) {
+ return vfslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask,
+ vfloat64m1_t maskedoff,
+ vfloat64m1_t src, double value,
+ size_t vl) {
+ return vfslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask,
+ vfloat64m2_t maskedoff,
+ vfloat64m2_t src, double value,
+ size_t vl) {
+ return vfslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask,
+ vfloat64m4_t maskedoff,
+ vfloat64m4_t src, double value,
+ size_t vl) {
+ return vfslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t src, double value,
+ size_t vl) {
+ return vfslide1down(mask, maskedoff, src, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
new file mode 100644
index 000000000000..81e8d33d9780
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c
@@ -0,0 +1,291 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i32(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
+ size_t vl) {
+ return vfslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i32(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
+ size_t vl) {
+ return vfslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i32(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
+ size_t vl) {
+ return vfslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i32(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
+ size_t vl) {
+ return vfslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i32(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
+ size_t vl) {
+ return vfslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i32(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
+ size_t vl) {
+ return vfslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i32(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
+ size_t vl) {
+ return vfslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i32(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
+ size_t vl) {
+ return vfslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i32(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
+ size_t vl) {
+ return vfslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vfloat32mf2_t src, float value,
+ size_t vl) {
+ return vfslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t src, float value,
+ size_t vl) {
+ return vfslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t src, float value,
+ size_t vl) {
+ return vfslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t src, float value,
+ size_t vl) {
+ return vfslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t src, float value,
+ size_t vl) {
+ return vfslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t src, double value,
+ size_t vl) {
+ return vfslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t src, double value,
+ size_t vl) {
+ return vfslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t src, double value,
+ size_t vl) {
+ return vfslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t src, double value,
+ size_t vl) {
+ return vfslide1up(mask, maskedoff, src, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c
new file mode 100644
index 000000000000..6d330b43d333
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
+ return vfsqrt(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
+ return vfsqrt(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
+ return vfsqrt(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
+ return vfsqrt(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
+ return vfsqrt(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
+ return vfsqrt(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
+ return vfsqrt(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
+ return vfsqrt(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
+ return vfsqrt(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, size_t vl) {
+ return vfsqrt(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, size_t vl) {
+ return vfsqrt(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, size_t vl) {
+ return vfsqrt(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, size_t vl) {
+ return vfsqrt(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, size_t vl) {
+ return vfsqrt(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, size_t vl) {
+ return vfsqrt(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, size_t vl) {
+ return vfsqrt(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, size_t vl) {
+ return vfsqrt(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, size_t vl) {
+ return vfsqrt(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
new file mode 100644
index 000000000000..591e260a8d11
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c
@@ -0,0 +1,551 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsub(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
new file mode 100644
index 000000000000..e62526b45acc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c
@@ -0,0 +1,491 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwadd_vf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i32(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
+ return vfwadd_wf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwadd_vf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i32(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
+ return vfwadd_wf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwadd_vf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i32(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
+ return vfwadd_wf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwadd_vf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i32(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
+ return vfwadd_wf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwadd_vf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, float op2, size_t vl) {
+ return vfwadd_wf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwadd_vf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, float op2, size_t vl) {
+ return vfwadd_wf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwadd_vf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, float op2, size_t vl) {
+ return vfwadd_wf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwadd_vf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, float op2, size_t vl) {
+ return vfwadd_wf(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
new file mode 100644
index 000000000000..95ee8107fe6a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c
@@ -0,0 +1,1118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
+ return vfwcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
+ return vfwcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
+ return vfwcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
+ return vfwcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
+ return vfwcvt_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
+ return vfwcvt_rtz_x(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
+ return vfwcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
+ return vfwcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
+ return vfwcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
+ return vfwcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
+ return vfwcvt_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
+ return vfwcvt_rtz_xu(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
+ return vfwcvt_f(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vint16mf4_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vint16mf2_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vint16m1_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vint16m2_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vint16m4_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vuint16mf4_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vuint16mf2_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vuint16m1_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vuint16m2_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vuint16m4_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfwcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfwcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfwcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfwcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfwcvt_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfwcvt_rtz_x(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask,
+ vuint64m1_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfwcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask,
+ vuint64m2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfwcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfwcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask,
+ vuint64m4_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfwcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfwcvt_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfwcvt_rtz_xu(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vint32mf2_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vint32m1_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vint32m2_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vint32m4_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vuint32mf2_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vuint32m1_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vuint32m2_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vuint32m4_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfwcvt_f(mask, maskedoff, src, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
new file mode 100644
index 000000000000..15e79f5cf249
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
@@ -0,0 +1,255 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmacc(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
new file mode 100644
index 000000000000..e2148c975281
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
@@ -0,0 +1,255 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfwmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfwmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfwmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmsac(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
new file mode 100644
index 000000000000..c10067c1f46f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c
@@ -0,0 +1,251 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwmul(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
new file mode 100644
index 000000000000..aa13e27a6abf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
@@ -0,0 +1,255 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwnmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfwnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfwnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfwnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwnmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ float op1, vfloat32m4_t op2, size_t vl) {
+ return vfwnmacc(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
new file mode 100644
index 000000000000..539f7d60eddd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
@@ -0,0 +1,255 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfwnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfwnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfwnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ float op1, vfloat32m4_t op2, size_t vl) {
+ return vfwnmsac(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
new file mode 100644
index 000000000000..e1339bf25124
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
@@ -0,0 +1,331 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
+ vfloat32m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
+ vfloat32m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
+ vfloat32m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
+ vfloat32m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat32m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat32m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat32m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+ vfloat32m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst,
+ vfloat32m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst,
+ vfloat32m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst,
+ vfloat32m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst,
+ vfloat32m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat32m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat32m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat32m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+ vfloat32m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
new file mode 100644
index 000000000000..d86264a73225
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c
@@ -0,0 +1,491 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwsub_vf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i32(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
+ return vfwsub_wf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwsub_vf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i32(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
+ return vfwsub_wf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwsub_vf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i32(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
+ return vfwsub_wf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwsub_vf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i32(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
+ return vfwsub_wf(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwsub_vf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, float op2, size_t vl) {
+ return vfwsub_wf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwsub_vf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, float op2, size_t vl) {
+ return vfwsub_wf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwsub_vf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, float op2, size_t vl) {
+ return vfwsub_wf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwsub_vf(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, float op2, size_t vl) {
+ return vfwsub_wf(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
new file mode 100644
index 000000000000..043924d35c1e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
@@ -0,0 +1,334 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ size_t vl) {
+ return vid(mask, maskedoff, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
new file mode 100644
index 000000000000..653c8ac4dbe5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
@@ -0,0 +1,338 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vbool2_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vbool1_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vbool2_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c
new file mode 100644
index 000000000000..0b313a0f5c30
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c
@@ -0,0 +1,859 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vle8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) {
+ return vle8(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) {
+ return vle16(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) {
+ return vle32(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) {
+ return vle64(mask, maskedoff, base, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c
new file mode 100644
index 000000000000..5652eda62b25
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c
@@ -0,0 +1,6125 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+ //
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei8_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei16_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei32_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vloxei64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64(mask, maskedoff, base, bindex, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c
new file mode 100644
index 000000000000..75bb6bd792e1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c
@@ -0,0 +1,965 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64(mask, maskedoff, base, bstride, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c
new file mode 100644
index 000000000000..15fa817c9e90
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c
@@ -0,0 +1,6123 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei8_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei16_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei32_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vluxei64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64(mask, maskedoff, base, bindex, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
new file mode 100644
index 000000000000..d8c9549ecf1c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c
new file mode 100644
index 000000000000..41ac0b350325
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadc.c
@@ -0,0 +1,2566 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2,
+ vbool2_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2,
+ vbool2_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2,
+ vbool1_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2,
+ vbool2_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2,
+ vbool2_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmadc(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
new file mode 100644
index 000000000000..aea689906706
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c
new file mode 100644
index 000000000000..1d762565fa7b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmand.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmandnot(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
new file mode 100644
index 000000000000..388fa93a85bc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmax(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmax(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmaxu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
new file mode 100644
index 000000000000..e2c78736e3ae
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmerge.c
@@ -0,0 +1,1466 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1,
+ vint16mf4_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1,
+ vint16mf2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1,
+ vint32mf2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1,
+ vuint8mf8_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1,
+ vuint8mf4_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1,
+ vuint8mf2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
+ vuint16mf4_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
+ uint16_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
+ vuint16mf2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
+ uint16_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1,
+ vuint16m1_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1,
+ vuint16m2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1,
+ vuint16m4_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1,
+ vuint16m8_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
+ vuint32mf2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
+ uint32_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1,
+ vuint32m1_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1,
+ vuint32m2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1,
+ vuint32m4_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1,
+ vuint32m8_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1,
+ vuint64m1_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1,
+ vuint64m2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1,
+ vuint64m4_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1,
+ vuint64m8_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
+ size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vmerge(mask, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
new file mode 100644
index 000000000000..ed73e9a55598
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfeq.c
@@ -0,0 +1,548 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfeq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfeq(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
new file mode 100644
index 000000000000..5d90b5e72160
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfge.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfge(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfge(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfge(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfge(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfge(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfge(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfge(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfge(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfge(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfge(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfge(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfge(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfge(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfge(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfge(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfge(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfge(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfge(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
new file mode 100644
index 000000000000..8a3d1da6fc2e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfgt.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfgt(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
new file mode 100644
index 000000000000..d0ea18094d42
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfle.c
@@ -0,0 +1,548 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfle(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
new file mode 100644
index 000000000000..4825bbbd664c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmflt.c
@@ -0,0 +1,548 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmflt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmflt(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
new file mode 100644
index 000000000000..4093ca0d11d0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmfne.c
@@ -0,0 +1,548 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfne(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
new file mode 100644
index 000000000000..6dd3e7ad4e67
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmin(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vminu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmin(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vminu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c
new file mode 100644
index 000000000000..7e0792efcc67
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnand.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmnand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmnand(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c
new file mode 100644
index 000000000000..7eed0c6caed1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmnor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmnor(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c
new file mode 100644
index 000000000000..9883130869a2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmor.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmornot(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c
new file mode 100644
index 000000000000..cc941bffe6d8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbc.c
@@ -0,0 +1,2566 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2,
+ vbool1_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsbc(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
new file mode 100644
index 000000000000..03be5a611345
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
@@ -0,0 +1,197 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { return vmsbf(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsbf(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
new file mode 100644
index 000000000000..c0729e5eb08b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmseq.c
@@ -0,0 +1,2584 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmseq(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmseq(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
new file mode 100644
index 000000000000..d98664edba9f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsgt.c
@@ -0,0 +1,1287 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsgt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsgt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
new file mode 100644
index 000000000000..0e6b39d63b1b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
@@ -0,0 +1,197 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { return vmsif(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsif(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
new file mode 100644
index 000000000000..dcfce37e9e43
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsle.c
@@ -0,0 +1,2599 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsle(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsleu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsle(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsleu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
new file mode 100644
index 000000000000..742d189eec00
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmslt.c
@@ -0,0 +1,2599 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmslt(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsltu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmslt(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsltu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
new file mode 100644
index 000000000000..12d6e21ba5ca
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsne.c
@@ -0,0 +1,2584 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsne(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsne(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
new file mode 100644
index 000000000000..f816cbceea4a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
@@ -0,0 +1,197 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { return vmsof(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsof(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
new file mode 100644
index 000000000000..0a370c7a2b89
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c
@@ -0,0 +1,6168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmulh(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmulh(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c
new file mode 100644
index 000000000000..ca5799c6f6f7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxnor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmxnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmxnor(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c
new file mode 100644
index 000000000000..809b4d80cd07
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmxor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmxor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmxor(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
new file mode 100644
index 000000000000..450bd33a8d99
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c
@@ -0,0 +1,1793 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
+ return vnclip(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
+ size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vnclipu(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint16mf4_t op1, vuint8mf8_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint16mf4_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint16mf2_t op1, vuint8mf4_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint16mf2_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint16m1_t op1, vuint8mf2_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint16m1_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint16m2_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint16m4_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint16m8_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint32mf2_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint32mf2_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint32m1_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint32m1_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint32m2_t op1, vuint16m1_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint32m2_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint32m4_t op1, vuint16m2_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint32m4_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint32m8_t op1, vuint16m4_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint32m8_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint64m1_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint64m1_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint64m2_t op1, vuint32m1_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint64m2_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint64m4_t op1, vuint32m2_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint64m4_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint64m8_t op1, vuint32m4_t shift,
+ size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint64m8_t op1, size_t shift, size_t vl) {
+ return vnclip(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint16mf4_t op1, vuint8mf8_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint16mf2_t op1, vuint8mf4_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint16m1_t op1, vuint8mf2_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint16m1_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint16m2_t op1, vuint8m1_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint16m2_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint16m4_t op1, vuint8m2_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint16m4_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint16m8_t op1, vuint8m4_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint16m8_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint32mf2_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint32mf2_t op1, size_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint32m1_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint32m1_t op1, size_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint32m2_t op1, vuint16m1_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint32m2_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint32m4_t op1, vuint16m2_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint32m4_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint32m8_t op1, vuint16m4_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint32m8_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint64m1_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint64m1_t op1, size_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint64m2_t op1, vuint32m1_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint64m2_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint64m4_t op1, vuint32m2_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint64m4_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint64m8_t op1, vuint32m4_t shift,
+ size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint64m8_t op1, size_t shift, size_t vl) {
+ return vnclipu(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
new file mode 100644
index 000000000000..6771870536b4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
new file mode 100644
index 000000000000..f71f384135d0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
new file mode 100644
index 000000000000..1000376024a7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c
@@ -0,0 +1,848 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
+ return vnsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
+ return vnsra(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
new file mode 100644
index 000000000000..9d17cfc47118
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c
@@ -0,0 +1,848 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vnsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
+ return vnsrl(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
new file mode 100644
index 000000000000..168a6f9ee88e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vor(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c
new file mode 100644
index 000000000000..52f58beab4c3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vpopc.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) {
+ return vpopc(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
+ return vpopc(mask, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
new file mode 100644
index 000000000000..17b893e694b9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
new file mode 100644
index 000000000000..e6b215d5c31f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
new file mode 100644
index 000000000000..cefff41a13b6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
new file mode 100644
index 000000000000..034b7ef3eb4c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector, vuint16m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector, vuint16m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector, vuint16m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector, vuint16m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector, vuint32m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector, vuint32m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector, vuint32m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector, vuint32m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector, vuint64m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector, vuint64m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector, vuint64m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector, vuint64m1_t scalar,
+ size_t vl) {
+ return vredor(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
new file mode 100644
index 000000000000..6fdadc8c78f0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
new file mode 100644
index 000000000000..3c9729ab3994
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
new file mode 100644
index 000000000000..ff6e320a7574
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrem(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vremu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrem(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vremu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
new file mode 100644
index 000000000000..1e68480efc71
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c
@@ -0,0 +1,4785 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
+ return vrgather(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vuint8mf8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vuint8mf4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vuint8mf2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vuint8m1_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vuint8m2_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vuint8m4_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vuint8m8_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vuint16mf4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vuint16mf2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vuint16m1_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vuint16m2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vuint16m4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vuint16m8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, size_t index, size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, size_t index,
+ size_t vl) {
+ return vrgather(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask,
+ vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask,
+ vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask,
+ vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask,
+ vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask,
+ vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask,
+ vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask,
+ vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask,
+ vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
new file mode 100644
index 000000000000..486232ce6ff4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c
@@ -0,0 +1,1240 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vrsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vrsub(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
new file mode 100644
index 000000000000..2e1d8e8a148e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c
@@ -0,0 +1,2587 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsadd(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsaddu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsadd(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsaddu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c
new file mode 100644
index 000000000000..e9f3b6844a25
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsbc.c
@@ -0,0 +1,1328 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc(op1, op2, borrowin, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c
new file mode 100644
index 000000000000..b957f296e175
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c
@@ -0,0 +1,1819 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11:#.*]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11:#.*]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) {
+ return vse8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) {
+ return vse16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1f32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2f32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4f32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8f32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16f32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) {
+ return vse32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1f64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2f64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4f64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8f64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) {
+ return vse64(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) {
+ return vse8(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) {
+ return vse16(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) {
+ return vse32(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) {
+ return vse64(mask, base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv64i1.i32(<vscale x 64 x i1> [[VALUE:%.*]], <vscale x 64 x i1>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv64i1.i64(<vscale x 64 x i1> [[VALUE:%.*]], <vscale x 64 x i1>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b1(uint8_t *base, vbool1_t value, size_t vl) {
+ return vse1(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv32i1.i32(<vscale x 32 x i1> [[VALUE:%.*]], <vscale x 32 x i1>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv32i1.i64(<vscale x 32 x i1> [[VALUE:%.*]], <vscale x 32 x i1>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b2(uint8_t *base, vbool2_t value, size_t vl) {
+ return vse1(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv16i1.i32(<vscale x 16 x i1> [[VALUE:%.*]], <vscale x 16 x i1>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv16i1.i64(<vscale x 16 x i1> [[VALUE:%.*]], <vscale x 16 x i1>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b4(uint8_t *base, vbool4_t value, size_t vl) {
+ return vse1(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv8i1.i32(<vscale x 8 x i1> [[VALUE:%.*]], <vscale x 8 x i1>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv8i1.i64(<vscale x 8 x i1> [[VALUE:%.*]], <vscale x 8 x i1>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b8(uint8_t *base, vbool8_t value, size_t vl) {
+ return vse1(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv4i1.i32(<vscale x 4 x i1> [[VALUE:%.*]], <vscale x 4 x i1>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv4i1.i64(<vscale x 4 x i1> [[VALUE:%.*]], <vscale x 4 x i1>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b16(uint8_t *base, vbool16_t value, size_t vl) {
+ return vse1(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv2i1.i32(<vscale x 2 x i1> [[VALUE:%.*]], <vscale x 2 x i1>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv2i1.i64(<vscale x 2 x i1> [[VALUE:%.*]], <vscale x 2 x i1>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) {
+ return vse1(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv1i1.i32(<vscale x 1 x i1> [[VALUE:%.*]], <vscale x 1 x i1>* [[TMP0]], i32 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv1i1.i64(<vscale x 1 x i1> [[VALUE:%.*]], <vscale x 1 x i1>* [[TMP0]], i64 [[VL:%.*]]) [[ATTR11]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b64(uint8_t *base, vbool64_t value, size_t vl) {
+ return vse1(base, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
new file mode 100644
index 000000000000..026fdbdf5e40
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
@@ -0,0 +1,820 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
+ return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
+ return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
+ return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
+ return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
+ return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
+ return vsext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
+ return vsext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
+ return vsext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
+ return vsext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
+ return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
+ return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
+ return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
+ return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
+ return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint8mf8_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint8mf4_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint8mf2_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint8m1_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint8m2_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint8m4_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint8mf8_t op1, size_t vl) {
+ return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint8mf4_t op1, size_t vl) {
+ return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint8mf2_t op1, size_t vl) {
+ return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint8m1_t op1, size_t vl) {
+ return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint8m2_t op1, size_t vl) {
+ return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint8mf8_t op1, size_t vl) {
+ return vsext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint8mf4_t op1, size_t vl) {
+ return vsext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint8mf2_t op1, size_t vl) {
+ return vsext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint8m1_t op1, size_t vl) {
+ return vsext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint16mf4_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint16mf2_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint16m1_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint16m2_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint16m4_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint16mf4_t op1, size_t vl) {
+ return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint16mf2_t op1, size_t vl) {
+ return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint16m1_t op1, size_t vl) {
+ return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint16m2_t op1, size_t vl) {
+ return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint32mf2_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint32m1_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint32m2_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint32m4_t op1, size_t vl) {
+ return vsext_vf2(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
new file mode 100644
index 000000000000..744a45f08af6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c
@@ -0,0 +1,1348 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i32(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i32(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i32(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i32(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i32(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i32(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i32(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i32(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i32(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i32(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i32(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i32(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i32(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i32(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i32(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i32(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t src, int8_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t src, int8_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t src, int8_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t src, int8_t value, size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t src, int8_t value, size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t src, int8_t value, size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t src, int8_t value, size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t src, int16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t src, int16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t src, int16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t src, int16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t src, int16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t src, int16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t src, int32_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t src, int32_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t src, int32_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t src, int32_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t src, int32_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t src, int64_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t src, int64_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t src, int64_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t src, int64_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask,
+ vuint16mf4_t maskedoff,
+ vuint16mf4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask,
+ vuint16mf2_t maskedoff,
+ vuint16mf2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask,
+ vuint32mf2_t maskedoff,
+ vuint32mf2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down(mask, maskedoff, src, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
new file mode 100644
index 000000000000..93c6ec673dfd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c
@@ -0,0 +1,1323 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i32(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i32(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i32(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i32(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i32(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i32(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i32(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i32(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i32(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i32(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i32(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i32(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i32(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i32(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i32(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i32(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t src, int8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t src, int8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t src, int8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t src, int8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t src, int8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t src, int8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t src, int8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t src, int16_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t src, int16_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t src, int16_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t src, int16_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t src, int16_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t src, int16_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t src, int32_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t src, int32_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t src, int32_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t src, int32_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t src, int32_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t src, int64_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t src, int64_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t src, int64_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t src, int64_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t src, uint8_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t src, uint8_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t src, uint8_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t src, uint8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t src, uint8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t src, uint8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t src, uint8_t value, size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up(mask, maskedoff, src, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
new file mode 100644
index 000000000000..6cb1239c64c1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
@@ -0,0 +1,1646 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i32(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i32(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i32(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i32(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i32(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i32(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i32(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
+ vint8mf8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
+ vint8mf4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
+ vint8mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
+ vint16mf4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
+ vint16mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
+ vint16m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
+ vint16m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
+ vint16m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
+ vint32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
+ vint32m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
+ vint32m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
+ vint32m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
+ vint64m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
+ vint64m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
+ vint64m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
+ vuint8mf8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
+ vuint8mf4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
+ vuint8mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t src, size_t offset, size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
+ vuint8m2_t src, size_t offset, size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
+ vuint8m4_t src, size_t offset, size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
+ vuint8m8_t src, size_t offset, size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
+ vuint16mf4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
+ vuint16mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
+ vuint16m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
+ vuint16m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
+ vuint16m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
+ vuint32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
+ vuint32m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
+ vuint32m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
+ vuint32m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
+ vuint64m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
+ vuint64m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
+ vuint64m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i32(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
+ vfloat32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i32(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
+ vfloat32m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i32(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
+ vfloat32m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i32(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
+ vfloat32m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i32(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
+ vfloat64m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i32(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
+ vfloat64m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i32(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
+ vfloat64m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown(mask, dst, src, offset, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
new file mode 100644
index 000000000000..907cb04407d7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
@@ -0,0 +1,1631 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i32(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i32(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i32(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i32(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i32(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i32(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i32(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
+ vint8mf8_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
+ vint8mf4_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
+ vint8mf2_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
+ vint16mf4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
+ vint16mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
+ vint16m2_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
+ vint16m4_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
+ vint16m8_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
+ vint32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
+ vint32m2_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
+ vint32m4_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
+ vint32m8_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
+ vint64m2_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
+ vint64m4_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
+ vint64m8_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
+ vuint8mf8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
+ vuint8mf4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
+ vuint8mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
+ vuint8m2_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
+ vuint8m4_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
+ vuint8m8_t src, size_t offset, size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
+ vuint16mf4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
+ vuint16mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
+ vuint16m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
+ vuint16m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
+ vuint16m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
+ vuint32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
+ vuint32m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
+ vuint32m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
+ vuint32m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
+ vuint64m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
+ vuint64m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
+ vuint64m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i32(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
+ vfloat32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i32(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
+ vfloat32m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i32(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
+ vfloat32m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i32(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
+ vfloat32m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i32(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
+ vfloat64m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i32(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
+ vfloat64m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i32(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
+ vfloat64m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup(mask, dst, src, offset, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
new file mode 100644
index 000000000000..e3fde37a24d8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsll(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsll(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
new file mode 100644
index 000000000000..ca0e02e5ed53
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c
@@ -0,0 +1,1287 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsmul(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c
new file mode 100644
index 000000000000..08c94ac686ed
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c
@@ -0,0 +1,6523 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsoxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsoxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsoxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsoxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex,
+ vint8m4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex,
+ vint8m8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex,
+ vint8m4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex,
+ vint16m8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base,
+ vuint16mf4_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base,
+ vuint16mf2_t bindex, vint16mf2_t value,
+ size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex,
+ vint16m8_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base,
+ vuint32mf2_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint16mf4_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint32mf2_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex,
+ vuint8m4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex,
+ vuint8m8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex,
+ vuint8m4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint16mf4_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint16mf2_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint32mf2_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint32m1_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint64m1_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint64m2_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint16mf4_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base,
+ vuint16mf2_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint32mf2_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint64m1_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint16mf4_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base,
+ vuint16mf2_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint32mf2_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsoxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsoxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsoxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsoxei64(mask, base, bindex, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
new file mode 100644
index 000000000000..9b8a93ee26b5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c
@@ -0,0 +1,1240 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
+ return vsra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
+ return vsra(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
new file mode 100644
index 000000000000..46ad59cd5b83
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c
@@ -0,0 +1,1240 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsrl(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c
new file mode 100644
index 000000000000..b787820e6391
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c
@@ -0,0 +1,1813 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value,
+ size_t vl) {
+ return vsse8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value,
+ size_t vl) {
+ return vsse16(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1f32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2f32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4f32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8f32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16f32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m8(float *base, ptrdiff_t bstride, vfloat32m8_t value,
+ size_t vl) {
+ return vsse32(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1f64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2f64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4f64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8f64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m8(double *base, ptrdiff_t bstride, vfloat64m8_t value,
+ size_t vl) {
+ return vsse64(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8mf8_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8mf4_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8mf2_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8m1_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8m2_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8m4_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8m8_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16mf4_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16mf2_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16m1_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16m2_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16m4_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16m8_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride,
+ vint32mf2_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride,
+ vint32m1_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride,
+ vint32m2_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride,
+ vint32m4_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride,
+ vint32m8_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride,
+ vint64m1_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride,
+ vint64m2_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride,
+ vint64m4_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride,
+ vint64m8_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8mf8_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8mf4_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8mf2_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8m1_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8m2_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8m4_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8m8_t value, size_t vl) {
+ return vsse8(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16mf4_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16mf2_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16m1_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16m2_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16m4_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16m8_t value, size_t vl) {
+ return vsse16(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride,
+ vuint32mf2_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride,
+ vuint32m1_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride,
+ vuint32m2_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride,
+ vuint32m4_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptrdiff_t bstride,
+ vuint32m8_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride,
+ vuint64m1_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride,
+ vuint64m2_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride,
+ vuint64m4_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride,
+ vuint64m8_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1f32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride,
+ vfloat32mf2_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2f32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride,
+ vfloat32m1_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4f32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride,
+ vfloat32m2_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8f32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride,
+ vfloat32m4_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16f32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptrdiff_t bstride,
+ vfloat32m8_t value, size_t vl) {
+ return vsse32(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1f64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride,
+ vfloat64m1_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2f64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride,
+ vfloat64m2_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4f64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride,
+ vfloat64m4_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8f64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride,
+ vfloat64m8_t value, size_t vl) {
+ return vsse64(mask, base, bstride, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
new file mode 100644
index 000000000000..c5031598d25e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c
@@ -0,0 +1,1290 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
+ return vssra(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, size_t shift, size_t vl) {
+ return vssra(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
new file mode 100644
index 000000000000..35cec9517b2a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c
@@ -0,0 +1,1305 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vssrl(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t shift,
+ size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, size_t shift, size_t vl) {
+ return vssrl(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
new file mode 100644
index 000000000000..9e485c4f8b4b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c
@@ -0,0 +1,2587 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vssub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vssubu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vssub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vssubu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
new file mode 100644
index 000000000000..31e047dbe709
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsub(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsub(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c
new file mode 100644
index 000000000000..edcddfe1919c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c
@@ -0,0 +1,6523 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsuxei8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsuxei16(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsuxei32(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsuxei64(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex,
+ vint8m4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex,
+ vint8m8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex,
+ vint8m4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex,
+ vint16m8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base,
+ vuint16mf4_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base,
+ vuint16mf2_t bindex, vint16mf2_t value,
+ size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex,
+ vint16m8_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base,
+ vuint32mf2_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint16mf4_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint32mf2_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex,
+ vuint8m4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex,
+ vuint8m8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex,
+ vuint8m4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint16mf4_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint16mf2_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint32mf2_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint32m1_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint64m1_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint64m2_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint16mf4_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base,
+ vuint16mf2_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint32mf2_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint64m1_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint16mf4_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base,
+ vuint16mf2_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint32mf2_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsuxei8(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsuxei16(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsuxei32(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsuxei64(mask, base, bindex, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
new file mode 100644
index 000000000000..744ea4d7709d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c
@@ -0,0 +1,3524 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i32(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i32(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i32(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i32(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i32(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i32(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i32(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i32(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i32(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i32(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i32(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwadd_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
+ return vwadd_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i32(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i32(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i32(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i32(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i32(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i32(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i32(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i32(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i32(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i32(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i32(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwaddu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
+ return vwaddu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint8mf2_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint8m1_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint8m2_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint8m4_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint16mf2_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint16m1_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint16m2_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint16m4_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint32mf2_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint32m1_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint32m2_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwadd_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint32m4_t op2, size_t vl) {
+ return vwadd_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwaddu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwaddu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c
new file mode 100644
index 000000000000..c2a99cb088c7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c
@@ -0,0 +1,3186 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
+ vint8mf8_t op2, size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
+ vint8mf4_t op2, size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
+ vint16mf4_t op2, size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
+ vint16mf2_t op2, size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
+ vint32mf2_t op2, size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
+ size_t vl) {
+ return vwmacc(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
+ vuint8mf8_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
+ vuint8mf8_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
+ vuint8mf4_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
+ vuint8mf4_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
+ vuint8mf2_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
+ vuint8m1_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
+ size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
+ vuint8m2_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
+ size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
+ vuint8m4_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
+ size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
+ vuint16mf4_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
+ vuint16mf4_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
+ vuint16mf2_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
+ vuint16mf2_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
+ vuint16m1_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
+ vuint16m1_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
+ vuint16m2_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
+ vuint16m2_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
+ vuint16m4_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
+ vuint16m4_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
+ vuint32mf2_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
+ vuint32mf2_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
+ vuint32m1_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
+ vuint32m1_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
+ vuint32m2_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
+ vuint32m2_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
+ vuint32m4_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
+ vuint32m4_t op2, size_t vl) {
+ return vwmaccu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
+ vuint8mf8_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
+ vuint8mf8_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
+ vuint8mf4_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
+ vuint8mf4_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
+ vuint8mf2_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
+ vuint16mf4_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
+ vuint16mf4_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
+ vuint16mf2_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
+ vuint16m1_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
+ vuint16m2_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
+ vuint16m4_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
+ vuint32mf2_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
+ vuint32m1_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
+ vuint32m2_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
+ vuint32m4_t op2, size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwmaccsu(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
+ vint8mf8_t op2, size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
+ vint8mf4_t op2, size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
+ vint16mf4_t op2, size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,
+ size_t vl) {
+ return vwmaccus(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
+ vint8mf8_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
+ vint8mf4_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
+ vint8mf2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
+ vint8m1_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
+ vint8m1_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
+ vint8m2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
+ vint8m2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
+ vint8m4_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
+ vint8m4_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
+ int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
+ vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
+ vint16mf2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
+ vint16m1_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
+ vint16m2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
+ vint16m2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
+ vint16m4_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
+ vint16m4_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
+ vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
+ vint32mf2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
+ vint32m1_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
+ vint32m2_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
+ vint32m4_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
+ vint32m4_t op2, size_t vl) {
+ return vwmacc(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
+ uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
+ uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
+ uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
+ vuint8m1_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
+ vuint8m2_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
+ vuint8m4_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
+ uint16_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
+ uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
+ uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
+ uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
+ uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
+ uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
+ uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
+ uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
+ uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vwmaccu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
+ vint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
+ int8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
+ vint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
+ int8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
+ vint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
+ vuint8mf2_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
+ vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
+ vuint8m1_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
+ vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
+ vuint8m2_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
+ vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
+ vuint8m4_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
+ vint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
+ int16_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
+ vint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
+ vuint16mf2_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
+ vint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
+ vuint16m1_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
+ vint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
+ vuint16m2_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
+ vint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
+ vuint16m4_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
+ vint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
+ vuint32mf2_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
+ vint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
+ vuint32m1_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
+ vint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
+ vuint32m2_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
+ vint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
+ vuint32m4_t op2, size_t vl) {
+ return vwmaccsu(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
+ uint8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
+ uint8_t op1, vint8mf4_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
+ vint8mf2_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
+ vint8m1_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
+ vint8m2_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
+ vint8m4_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
+ uint16_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
+ uint16_t op1, vint16mf2_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
+ uint16_t op1, vint16m1_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
+ vint16m2_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
+ vint16m4_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
+ uint32_t op1, vint32mf2_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
+ uint32_t op1, vint32m1_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
+ uint32_t op1, vint32m2_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1,
+ vint32m4_t op2, size_t vl) {
+ return vwmaccus(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
new file mode 100644
index 000000000000..ea2e12fef8cf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c
@@ -0,0 +1,2528 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwmul(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwmulu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwmul(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwmulu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
new file mode 100644
index 000000000000..ef6d793ade34
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
@@ -0,0 +1,1124 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint8mf8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint8mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint8mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint8m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint8m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint8m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst,
+ vint8m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint16mf4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint16mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint16m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint16m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint16m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst,
+ vint16m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint32mf2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint32m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint32m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint32m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst,
+ vint32m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint8mf8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint8mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint8mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint8m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint8m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint8m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst,
+ vuint8m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint16mf4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint16mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint16m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint16m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint16m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst,
+ vuint16m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint32mf2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint32m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint32m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint32m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dst,
+ vuint32m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
new file mode 100644
index 000000000000..9c483f42c82b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c
@@ -0,0 +1,3524 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i32(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i32(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i32(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i32(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i32(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i32(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i32(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i32(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i32(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i32(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i32(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwsub_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
+ return vwsub_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i32(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i32(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i32(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i32(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i32(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i32(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i32(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i32(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i32(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i32(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i32(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwsubu_vv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
+ return vwsubu_wv(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint8mf2_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint8m1_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint8m2_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint8m4_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint16mf2_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint16m1_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint16m2_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint16m4_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint32mf2_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint32m1_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint32m2_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwsub_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint32m4_t op2, size_t vl) {
+ return vwsub_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwsubu_vv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwsubu_wv(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
new file mode 100644
index 000000000000..42ba2e276a8b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vxor(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR8]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vxor(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
new file mode 100644
index 000000000000..2dc9d3d7824d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
@@ -0,0 +1,820 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
+ return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
+ return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
+ return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
+ return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
+ return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
+ return vzext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
+ return vzext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
+ return vzext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
+ return vzext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
+ return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
+ return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
+ return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
+ return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
+ return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint8mf8_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint8mf4_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint8mf2_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint8m1_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint8m2_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint8m4_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint8mf8_t op1, size_t vl) {
+ return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint8mf4_t op1, size_t vl) {
+ return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint8mf2_t op1, size_t vl) {
+ return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint8m1_t op1, size_t vl) {
+ return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint8m2_t op1, size_t vl) {
+ return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint8mf8_t op1, size_t vl) {
+ return vzext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint8mf4_t op1, size_t vl) {
+ return vzext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint8mf2_t op1, size_t vl) {
+ return vzext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint8m1_t op1, size_t vl) {
+ return vzext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint16mf4_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint16mf2_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint16m1_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint16m2_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint16m4_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint16mf4_t op1, size_t vl) {
+ return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint16mf2_t op1, size_t vl) {
+ return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint16m1_t op1, size_t vl) {
+ return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint16m2_t op1, size_t vl) {
+ return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint32mf2_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint32m1_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint32m2_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint32m4_t op1, size_t vl) {
+ return vzext_vf2(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
new file mode 100644
index 000000000000..b7a9ee6bcdf5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c
@@ -0,0 +1,2587 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vaadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vaadd_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vaadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vaadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vaadd_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vaadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vaadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vaadd_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vaadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vaadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vaadd_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vaadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vaadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vaadd_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vaadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vaadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vaadd_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vaadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vaadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vaadd_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vaadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vaadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vaadd_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vaadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vaadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vaadd_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vaadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vaadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vaadd_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vaadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vaadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vaadd_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vaadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vaadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vaadd_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vaadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vaadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vaadd_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vaadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vaadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vaadd_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vaadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vaadd_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vaadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vaadd_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vaadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vaadd_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vaadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vaadd_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vaadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vaadd_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vaadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vaadd_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vaadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vaadd_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vaadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vaadd_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vaadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vaadd_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vaadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vaadd_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vaadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vaadd_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vaadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vaadd_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vaadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vaadd_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vaadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vaadd_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vaadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vaadd_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vaadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vaadd_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vaadd_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vaaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vaaddu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vaaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vaaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vaaddu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vaaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vaaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vaaddu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vaaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vaaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vaaddu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vaaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vaaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vaaddu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vaaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vaaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vaaddu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vaaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vaaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vaaddu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vaaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vaaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vaaddu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vaaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vaaddu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vaaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vaaddu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vaaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vaaddu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vaaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vaaddu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vaaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vaaddu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vaaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vaaddu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vaaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vaaddu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vaaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vaaddu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vaaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vaaddu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vaaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vaaddu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vaaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vaaddu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vaaddu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vaaddu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vaaddu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vaaddu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vaaddu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vaaddu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vaaddu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vaaddu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vaaddu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vaaddu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vaaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vaaddu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vaaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vaaddu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vaaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vaaddu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vaaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vaaddu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vaaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vaaddu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vaaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vaaddu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vaaddu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vaaddu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vaadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vaadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vaadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vaadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vaadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vaadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vaadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vaadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vaadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vaadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vaadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vaadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vaadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vaadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vaadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vaadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vaadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vaadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vaadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vaadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vaadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vaadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vaadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vaadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vaadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vaadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vaadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vaadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vaadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vaadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vaadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vaadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaadd_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vaadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vaaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vaaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vaaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vaaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vaaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vaaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vaaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vaaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vaaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vaaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vaaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vaaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vaaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vaaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vaaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vaaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vaaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vaaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vaaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vaaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vaaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vaaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vaaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vaaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vaaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vaaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vaaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vaaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vaaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vaaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vaaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vaaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vaaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vaaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vaaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vaaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vaaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vaaddu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vaaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c
new file mode 100644
index 000000000000..01a4a6f303e9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadc.c
@@ -0,0 +1,1328 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vvm_i8mf8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vadc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t carryin,
+ size_t vl) {
+ return vadc_vxm_i8mf8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vvm_i8mf4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vadc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t carryin,
+ size_t vl) {
+ return vadc_vxm_i8mf4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vvm_i8mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vadc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t carryin,
+ size_t vl) {
+ return vadc_vxm_i8mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vvm_i8m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vadc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vxm_i8m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc_vvm_i8m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vadc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc_vxm_i8m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc_vvm_i8m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vadc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc_vxm_i8m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vadc_vvm_i8m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vadc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vadc_vxm_i8m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vvm_i16mf4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vadc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vxm_i16mf4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vvm_i16mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vadc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vxm_i16mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vvm_i16m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vadc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t carryin,
+ size_t vl) {
+ return vadc_vxm_i16m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vvm_i16m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vadc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vxm_i16m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc_vvm_i16m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vadc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc_vxm_i16m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc_vvm_i16m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vadc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc_vxm_i16m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vvm_i32mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vadc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vxm_i32mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vvm_i32m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vadc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t carryin,
+ size_t vl) {
+ return vadc_vxm_i32m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vvm_i32m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vadc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t carryin,
+ size_t vl) {
+ return vadc_vxm_i32m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vvm_i32m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vadc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vxm_i32m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc_vvm_i32m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vadc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc_vxm_i32m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vvm_i64m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vadc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t carryin,
+ size_t vl) {
+ return vadc_vxm_i64m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vvm_i64m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vadc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t carryin,
+ size_t vl) {
+ return vadc_vxm_i64m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vvm_i64m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vadc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t carryin,
+ size_t vl) {
+ return vadc_vxm_i64m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vvm_i64m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vadc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vxm_i64m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vvm_u8mf8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vadc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t carryin,
+ size_t vl) {
+ return vadc_vxm_u8mf8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vvm_u8mf4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vadc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t carryin,
+ size_t vl) {
+ return vadc_vxm_u8mf4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vvm_u8mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vadc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t carryin,
+ size_t vl) {
+ return vadc_vxm_u8mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vvm_u8m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vadc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vxm_u8m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc_vvm_u8m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vadc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc_vxm_u8m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc_vvm_u8m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vadc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc_vxm_u8m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vadc_vvm_u8m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vadc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vadc_vxm_u8m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vvm_u16mf4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vadc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vxm_u16mf4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vvm_u16mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vadc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vxm_u16mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vvm_u16m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vadc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vxm_u16m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vadc_vvm_u16m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vadc_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vxm_u16m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vadc_vvm_u16m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vadc_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc_vxm_u16m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
+ vbool2_t carryin, size_t vl) {
+ return vadc_vvm_u16m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vadc_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vadc_vxm_u16m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vvm_u32mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vadc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vxm_u32mf2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vvm_u32m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vadc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vxm_u32m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vvm_u32m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vadc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vxm_u32m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vadc_vvm_u32m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vadc_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vxm_u32m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vadc_vvm_u32m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vadc_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vadc_vxm_u32m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vvm_u64m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vadc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vadc_vxm_u64m1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vvm_u64m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vadc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vadc_vxm_u64m2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vvm_u64m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vadc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vadc_vxm_u64m4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vvm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vvm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vadc_vvm_u64m8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vadc_vxm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vadc_vxm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vadc_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vadc_vxm_u64m8(op1, op2, carryin, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
index d3d73e8d7bba..902f5e0bf2ed 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
-// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t
-// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
new file mode 100644
index 000000000000..33f2836b5cda
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vand_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vand_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vand_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vand_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vand_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vand_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vand_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vand_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vand_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vand_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vand_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vand_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vand_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vand_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vand_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vand_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vand_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vand_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vand_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vand_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vand_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vand_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vand_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vand_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vand_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vand_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vand_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vand_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vand_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vand_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vand_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vand_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vand_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vand_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vand_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vand_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vand_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vand_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vand_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vand_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vand_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vand_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vand_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vand_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vand_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vand_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vand_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vand_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vand_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vand_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vand_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vand_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vand_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vand_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vand_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vand_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vand_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vand_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vand_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vand_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vand_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vand_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vand_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vand_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vand_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vand_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vand_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vand_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vand_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vand_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vand_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vand.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vand_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vand.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vand_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vand.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vand_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vand.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vand_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vand.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vand_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vand.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vand_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vand.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vand_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vand_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vand.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vand_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vand.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vand_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vand.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vand_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vand.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vand_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vand.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vand_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vand.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vand_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vand_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vand.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vand_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vand.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vand_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vand.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vand_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vand.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vand_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vand.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vand_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vand_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vand.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vand_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vand.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vand_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vand.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vand_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vand_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vand_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vand.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vand_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
new file mode 100644
index 000000000000..c7e376be4d80
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c
@@ -0,0 +1,2587 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vasub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vasub_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vasub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vasub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vasub_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vasub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vasub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vasub_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vasub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vasub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vasub_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vasub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vasub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vasub_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vasub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vasub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vasub_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vasub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vasub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vasub_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vasub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vasub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vasub_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vasub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vasub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vasub_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vasub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vasub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vasub_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vasub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vasub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vasub_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vasub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vasub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vasub_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vasub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vasub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vasub_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vasub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vasub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vasub_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vasub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vasub_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vasub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vasub_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vasub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vasub_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vasub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vasub_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vasub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vasub_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vasub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vasub_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vasub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vasub_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vasub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vasub_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vasub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vasub_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vasub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vasub_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vasub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vasub_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vasub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vasub_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vasub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vasub_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vasub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vasub_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vasub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vasub_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vasub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vasub_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vasub_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vasubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vasubu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vasubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vasubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vasubu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vasubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vasubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vasubu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vasubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vasubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vasubu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vasubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vasubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vasubu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vasubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vasubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vasubu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vasubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vasubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vasubu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vasubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vasubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vasubu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vasubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vasubu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vasubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vasubu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vasubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vasubu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vasubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vasubu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vasubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vasubu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vasubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vasubu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vasubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vasubu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vasubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vasubu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vasubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vasubu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vasubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vasubu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vasubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vasubu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vasubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vasubu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vasubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vasubu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vasubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vasubu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vasubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vasubu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vasubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vasubu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vasubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vasubu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vasubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vasubu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vasubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vasubu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vasubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vasubu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vasubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vasubu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vasubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vasubu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vasubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vasubu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vasubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vasubu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vasubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vasubu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vasubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vasubu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vasubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vasubu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vasubu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vasubu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vasub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vasub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vasub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vasub_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vasub_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vasub_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vasub_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vasub_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vasub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vasub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vasub_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vasub_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vasub_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vasub_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vasub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vasub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vasub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vasub_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vasub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vasub_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vasub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vasub_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vasub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vasub_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vasub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vasub_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vasub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vasub_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vasub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vasub_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vasub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vasub_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vasub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vasubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vasubu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vasubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vasubu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vasubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vasubu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vasubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vasubu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vasubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vasubu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vasubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vasubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vasubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vasubu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vasubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vasubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vasubu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vasubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vasubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vasubu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vasubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vasubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vasubu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vasubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vasubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vasubu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vasubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vasubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vasubu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vasubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vasubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vasubu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vasubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vasubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vasubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vasubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vasubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vasubu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vasubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vasubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vasubu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vasubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vasubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vasubu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vasubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vasubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vasubu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vasubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vasubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vasubu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vasubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vasubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vasubu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vasubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vasubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vasubu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vasubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vasubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vasubu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vasubu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vasubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
new file mode 100644
index 000000000000..3f2138a44fce
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vdiv_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vdiv_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vdiv_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vdiv_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vdiv_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vdiv_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vdiv_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vdiv_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vdiv_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vdiv_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vdiv_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vdiv_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vdiv_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vdiv_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vdiv_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vdiv_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vdiv_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vdiv_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vdiv_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vdiv_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vdiv_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vdiv_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vdivu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vdivu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vdivu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vdivu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vdivu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vdivu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vdivu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vdivu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vdivu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vdivu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vdivu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vdivu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vdivu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vdivu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vdivu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vdivu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vdivu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vdivu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vdivu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vdivu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vdivu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vdivu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vdiv_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdiv.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vdiv_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdiv.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vdiv_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdiv.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vdiv_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdiv.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vdiv_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdiv.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vdiv_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdiv.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vdiv_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdiv.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vdiv_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vdiv_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdiv.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vdiv_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdiv.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vdiv_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdiv.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vdiv_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdiv.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vdiv_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdiv.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vdiv_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdiv.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vdiv_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vdiv_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdiv.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vdiv_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdiv.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vdiv_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdiv.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vdiv_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdiv.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vdiv_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdiv.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vdiv_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vdiv_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdiv.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vdiv_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdiv.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vdiv_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdiv.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vdiv_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdiv_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdiv.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vdiv_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vdivu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vdivu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vdivu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vdivu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vdivu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vdivu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vdivu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vdivu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vdivu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vdivu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vdivu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vdivu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vdivu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vdivu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vdivu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vdivu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vdivu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vdivu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vdivu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vdivu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vdivu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vdivu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vdivu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vdivu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vdivu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vdivu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vdivu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vdivu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vdivu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vdivu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vdivu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vdivu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vdivu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vdivu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vdivu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vdivu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vdivu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vdivu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vdivu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vdivu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vdivu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vdivu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vdivu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vdivu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vdivu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vdivu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vdivu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vdivu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vdivu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
index 2c8f0e54afcf..43be374794d4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
-// RUN: -target-feature +experimental-zfh -Werror -Wall -o - %s >/dev/null 2>%t
-// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
new file mode 100644
index 000000000000..8068a02b0c39
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
+ return vfclass_v_u32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
+ return vfclass_v_u32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
+ return vfclass_v_u32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
+ return vfclass_v_u32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
+ return vfclass_v_u32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
+ return vfclass_v_u64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
+ return vfclass_v_u64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
+ return vfclass_v_u64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
+ return vfclass_v_u64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vfloat32mf2_t op1, size_t vl) {
+ return vfclass_v_u32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vfloat32m1_t op1, size_t vl) {
+ return vfclass_v_u32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vfloat32m2_t op1, size_t vl) {
+ return vfclass_v_u32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vfloat32m4_t op1, size_t vl) {
+ return vfclass_v_u32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vfloat32m8_t op1, size_t vl) {
+ return vfclass_v_u32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vfloat64m1_t op1, size_t vl) {
+ return vfclass_v_u64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vfloat64m2_t op1, size_t vl) {
+ return vfclass_v_u64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vfloat64m4_t op1, size_t vl) {
+ return vfclass_v_u64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfclass_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vfloat64m8_t op1, size_t vl) {
+ return vfclass_v_u64m8_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
new file mode 100644
index 000000000000..87f0a972be38
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c
@@ -0,0 +1,1579 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
+ return vfcvt_x_f_v_i32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
+ return vfcvt_x_f_v_i32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
+ return vfcvt_x_f_v_i32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
+ return vfcvt_x_f_v_i32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
+ return vfcvt_x_f_v_i32m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i32m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
+ return vfcvt_xu_f_v_u32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
+ return vfcvt_xu_f_v_u32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
+ return vfcvt_xu_f_v_u32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
+ return vfcvt_xu_f_v_u32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
+ return vfcvt_xu_f_v_u32m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u32m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
+ return vfcvt_f_x_v_f32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
+ return vfcvt_f_x_v_f32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
+ return vfcvt_f_x_v_f32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
+ return vfcvt_f_x_v_f32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
+ return vfcvt_f_x_v_f32m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
+ return vfcvt_f_xu_v_f32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
+ return vfcvt_f_xu_v_f32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
+ return vfcvt_f_xu_v_f32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
+ return vfcvt_f_xu_v_f32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
+ return vfcvt_f_xu_v_f32m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
+ return vfcvt_x_f_v_i64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
+ return vfcvt_x_f_v_i64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
+ return vfcvt_x_f_v_i64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
+ return vfcvt_x_f_v_i64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
+ return vfcvt_xu_f_v_u64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
+ return vfcvt_xu_f_v_u64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
+ return vfcvt_xu_f_v_u64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
+ return vfcvt_xu_f_v_u64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
+ return vfcvt_f_x_v_f64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
+ return vfcvt_f_x_v_f64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
+ return vfcvt_f_x_v_f64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
+ return vfcvt_f_x_v_f64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
+ return vfcvt_f_xu_v_f64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
+ return vfcvt_f_xu_v_f64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
+ return vfcvt_f_xu_v_f64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
+ return vfcvt_f_xu_v_f64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask,
+ vuint32mf2_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vint32mf2_t src, size_t vl) {
+ return vfcvt_f_x_v_f32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vint32m1_t src, size_t vl) {
+ return vfcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vint32m2_t src, size_t vl) {
+ return vfcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vint32m4_t src, size_t vl) {
+ return vfcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vint32m8_t src, size_t vl) {
+ return vfcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vuint32mf2_t src, size_t vl) {
+ return vfcvt_f_xu_v_f32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vuint32m1_t src, size_t vl) {
+ return vfcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vuint32m2_t src, size_t vl) {
+ return vfcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vuint32m4_t src, size_t vl) {
+ return vfcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vuint32m8_t src, size_t vl) {
+ return vfcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_x_f_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_xu_f_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vint64m1_t src, size_t vl) {
+ return vfcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vint64m2_t src, size_t vl) {
+ return vfcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vint64m4_t src, size_t vl) {
+ return vfcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_x_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vint64m8_t src, size_t vl) {
+ return vfcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vuint64m1_t src, size_t vl) {
+ return vfcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vuint64m2_t src, size_t vl) {
+ return vfcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vuint64m4_t src, size_t vl) {
+ return vfcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfcvt_f_xu_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vuint64m8_t src, size_t vl) {
+ return vfcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
new file mode 100644
index 000000000000..bca8b4c63ed8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c
@@ -0,0 +1,551 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfdiv_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfdiv_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfdiv_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfdiv_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfdiv_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfdiv_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfdiv_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfdiv_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfdiv_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfdiv_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfdiv_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfdiv_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfdiv_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfdiv_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfdiv_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfdiv_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfdiv_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfdiv_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfdiv_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfdiv_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfdiv_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfdiv_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfdiv_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfdiv_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfdiv_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfdiv_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfdiv_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfdiv_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfdiv_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfdiv_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfdiv_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfdiv_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfdiv_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfdiv_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfdiv_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfdiv_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c
new file mode 100644
index 000000000000..e1201f2c8a19
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfirst.c
@@ -0,0 +1,196 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b1(vbool1_t op1, size_t vl) { return vfirst_m_b1(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b2(vbool2_t op1, size_t vl) { return vfirst_m_b2(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b4(vbool4_t op1, size_t vl) { return vfirst_m_b4(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b8(vbool8_t op1, size_t vl) { return vfirst_m_b8(op1, vl); }
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b16(vbool16_t op1, size_t vl) {
+ return vfirst_m_b16(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b32(vbool32_t op1, size_t vl) {
+ return vfirst_m_b32(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b64(vbool64_t op1, size_t vl) {
+ return vfirst_m_b64(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
+ return vfirst_m_b1_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
+ return vfirst_m_b2_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
+ return vfirst_m_b4_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
+ return vfirst_m_b8_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
+ return vfirst_m_b16_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
+ return vfirst_m_b32_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfirst_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vfirst.mask.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfirst_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vfirst.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+long test_vfirst_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
+ return vfirst_m_b64_m(mask, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
new file mode 100644
index 000000000000..e0f83ffc9cb6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
@@ -0,0 +1,560 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmacc_vv_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmacc_vf_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmacc_vv_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmacc_vf_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmacc_vv_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmacc_vf_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmacc_vv_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmacc_vf_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmacc_vv_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmacc_vf_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmacc_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmacc_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmacc_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmacc_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmacc_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmacc_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmacc_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmacc_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmacc_vv_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfmacc_vf_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmacc_vv_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmacc_vf_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmacc_vv_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmacc_vf_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmacc_vv_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmacc_vf_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmacc_vv_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmacc_vf_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmacc_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfmacc_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmacc_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfmacc_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmacc_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfmacc_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmacc_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmacc_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmacc_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
new file mode 100644
index 000000000000..01ab8439e639
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
@@ -0,0 +1,560 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmadd_vv_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmadd_vf_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmadd_vv_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmadd_vf_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmadd_vv_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmadd_vf_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmadd_vv_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmadd_vf_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmadd_vv_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmadd_vf_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmadd_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmadd_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmadd_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmadd_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmadd_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmadd_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmadd_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmadd_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmadd_vv_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfmadd_vf_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmadd_vv_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmadd_vf_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmadd_vv_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmadd_vf_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmadd_vv_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmadd_vf_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmadd_vv_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmadd_vf_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmadd_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfmadd_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmadd_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfmadd_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmadd_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfmadd_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmadd_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmadd_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmadd_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
new file mode 100644
index 000000000000..ee4cdb03d1c6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c
@@ -0,0 +1,551 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmax_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmax_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmax_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmax_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmax_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmax_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmax_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmax_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmax_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmax_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmax_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmax_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmax_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmax_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmax_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmax_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmax_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmax_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmax_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmax_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmax_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmax_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmax_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmax_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmax_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmax_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmax_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmax_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmax_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmax_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmax_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmax_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmax_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmax_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmax_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmax.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmax_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmax_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmax.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmax_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmax_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmax.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmax_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmax_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmax.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmax_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmax_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmax.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmax_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmax_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmax.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmax_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmax_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmax.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmax_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmax_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmax.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmax_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmax_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmax_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmax.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmax_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
new file mode 100644
index 000000000000..be96573d9c55
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c
@@ -0,0 +1,146 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
+ float op2, size_t vl) {
+ return vfmerge_vfm_f32mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2,
+ size_t vl) {
+ return vfmerge_vfm_f32m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2,
+ size_t vl) {
+ return vfmerge_vfm_f32m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2,
+ size_t vl) {
+ return vfmerge_vfm_f32m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2,
+ size_t vl) {
+ return vfmerge_vfm_f32m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1,
+ double op2, size_t vl) {
+ return vfmerge_vfm_f64m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1,
+ double op2, size_t vl) {
+ return vfmerge_vfm_f64m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1,
+ double op2, size_t vl) {
+ return vfmerge_vfm_f64m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2,
+ size_t vl) {
+ return vfmerge_vfm_f64m8(mask, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
new file mode 100644
index 000000000000..2230af151f28
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c
@@ -0,0 +1,551 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmin_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmin_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmin_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmin_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmin_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmin_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmin_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmin_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmin_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmin_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmin_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmin_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmin_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmin_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmin_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmin_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmin_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmin_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmin_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmin_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmin_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmin_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmin_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmin_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmin_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmin_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmin_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmin_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmin_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmin_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmin_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmin_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmin_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmin_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmin_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmin_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmin.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmin_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmin_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmin.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmin_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmin_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmin.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmin_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmin_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmin.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmin_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmin_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmin.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmin_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmin_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmin.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmin_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmin_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmin.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmin_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmin_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmin.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmin_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmin_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmin_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmin.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmin_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
new file mode 100644
index 000000000000..dcbab0cf914d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
@@ -0,0 +1,560 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmsac_vv_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmsac_vf_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmsac_vv_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmsac_vf_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmsac_vv_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmsac_vf_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmsac_vv_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmsac_vf_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmsac_vv_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmsac_vf_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmsac_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmsac_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmsac_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmsac_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmsac_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmsac_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsac_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsac_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmsac_vv_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfmsac_vf_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmsac_vv_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmsac_vf_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmsac_vv_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmsac_vf_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmsac_vv_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmsac_vf_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmsac_vv_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmsac_vf_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmsac_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfmsac_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmsac_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfmsac_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmsac_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfmsac_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmsac_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsac_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsac_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
new file mode 100644
index 000000000000..2c0e35c495df
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
@@ -0,0 +1,560 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmsub_vv_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfmsub_vf_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmsub_vv_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmsub_vf_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmsub_vv_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmsub_vf_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmsub_vv_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmsub_vf_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmsub_vv_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmsub_vf_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmsub_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfmsub_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmsub_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfmsub_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmsub_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfmsub_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsub_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsub_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmsub_vv_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfmsub_vf_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmsub_vv_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfmsub_vf_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmsub_vv_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfmsub_vf_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmsub_vv_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfmsub_vf_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmsub_vv_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfmsub_vf_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmsub_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfmsub_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmsub_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfmsub_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmsub_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfmsub_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmsub_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmsub_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfmsub_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
new file mode 100644
index 000000000000..709f3e776a63
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c
@@ -0,0 +1,551 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmul_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmul_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmul_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmul_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmul_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmul_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmul_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmul_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmul_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmul_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmul_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmul_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmul_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmul_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmul_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmul_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmul_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmul_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmul_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmul_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmul_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmul_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmul_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmul_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmul_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmul_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmul_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmul_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmul_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmul_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmul_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmul_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmul_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmul_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmul_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfmul_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmul.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfmul_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmul.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmul.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmul.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmul.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmul.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmul.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmul.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfmul_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmul.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
new file mode 100644
index 000000000000..3f89ca551ddd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c
@@ -0,0 +1,1535 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
+ return vfncvt_x_f_w_i16mf4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i16mf4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
+ return vfncvt_x_f_w_i16mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i16mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
+ return vfncvt_x_f_w_i16m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i16m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
+ return vfncvt_x_f_w_i16m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i16m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
+ return vfncvt_x_f_w_i16m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i16m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
+ return vfncvt_xu_f_w_u16mf4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u16mf4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
+ return vfncvt_xu_f_w_u16mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u16mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
+ return vfncvt_xu_f_w_u16m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u16m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
+ return vfncvt_xu_f_w_u16m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u16m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
+ return vfncvt_xu_f_w_u16m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i32(<vscale x 16 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u16m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_x_f_w_i32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_x_f_w_i32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_x_f_w_i32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_x_f_w_i32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_xu_f_w_u32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_xu_f_w_u32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_xu_f_w_u32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_xu_f_w_u32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
+ return vfncvt_f_x_w_f32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
+ return vfncvt_f_x_w_f32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
+ return vfncvt_f_x_w_f32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
+ return vfncvt_f_x_w_f32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
+ return vfncvt_f_xu_w_f32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
+ return vfncvt_f_xu_w_f32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
+ return vfncvt_f_xu_w_f32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
+ return vfncvt_f_xu_w_f32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_f_f_w_f32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i32(<vscale x 1 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
+ return vfncvt_rod_f_f_w_f32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_f_f_w_f32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i32(<vscale x 2 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
+ return vfncvt_rod_f_f_w_f32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_f_f_w_f32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i32(<vscale x 4 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
+ return vfncvt_rod_f_f_w_f32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_f_f_w_f32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i32(<vscale x 8 x double> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
+ return vfncvt_rod_f_f_w_f32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfncvt_x_f_w_i16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask,
+ vint16mf4_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfncvt_x_f_w_i16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask,
+ vint16mf2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfncvt_x_f_w_i16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfncvt_x_f_w_i16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfncvt_x_f_w_i16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfncvt_xu_f_w_u16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask,
+ vuint16mf4_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u16mf4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfncvt_xu_f_w_u16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask,
+ vuint16mf2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u16mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask,
+ vuint16m1_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u16m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u16m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vfloat32m8_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u16m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_x_f_w_i32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask,
+ vint32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_x_f_w_i32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_x_f_w_i32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_x_f_w_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_x_f_w_i32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_rtz_x_f_w_i32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_xu_f_w_u32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask,
+ vuint32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_xu_f_w_u32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask,
+ vuint32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_xu_f_w_u32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask,
+ vuint32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_xu_f_w_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_xu_f_w_u32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_rtz_xu_f_w_u32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vint64m1_t src, size_t vl) {
+ return vfncvt_f_x_w_f32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vint64m2_t src, size_t vl) {
+ return vfncvt_f_x_w_f32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vint64m4_t src, size_t vl) {
+ return vfncvt_f_x_w_f32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_x_w_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vint64m8_t src, size_t vl) {
+ return vfncvt_f_x_w_f32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vuint64m1_t src, size_t vl) {
+ return vfncvt_f_xu_w_f32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vuint64m2_t src, size_t vl) {
+ return vfncvt_f_xu_w_f32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vuint64m4_t src, size_t vl) {
+ return vfncvt_f_xu_w_f32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_xu_w_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vuint64m8_t src, size_t vl) {
+ return vfncvt_f_xu_w_f32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_f_f_w_f32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vfloat64m1_t src, size_t vl) {
+ return vfncvt_rod_f_f_w_f32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_f_f_w_f32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask,
+ vfloat32m1_t maskedoff,
+ vfloat64m2_t src, size_t vl) {
+ return vfncvt_rod_f_f_w_f32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_f_f_w_f32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask,
+ vfloat32m2_t maskedoff,
+ vfloat64m4_t src, size_t vl) {
+ return vfncvt_rod_f_f_w_f32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_f_f_w_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_f_f_w_f32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask,
+ vfloat32m4_t maskedoff,
+ vfloat64m8_t src, size_t vl) {
+ return vfncvt_rod_f_f_w_f32m4_m(mask, maskedoff, src, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
new file mode 100644
index 000000000000..30407445af67
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
@@ -0,0 +1,561 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmacc_vv_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmacc_vf_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmacc_vv_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmacc_vf_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmacc_vv_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmacc_vf_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmacc_vv_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmacc_vf_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmacc_vv_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmacc_vf_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmacc_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmacc_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmacc_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmacc_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmacc_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmacc_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmacc_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmacc_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmacc_vv_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmacc_vf_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfnmacc_vv_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfnmacc_vf_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfnmacc_vv_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfnmacc_vf_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfnmacc_vv_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmacc_vf_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfnmacc_vv_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmacc_vf_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfnmacc_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfnmacc_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfnmacc_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfnmacc_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfnmacc_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfnmacc_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfnmacc_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmacc_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ double op1, vfloat64m8_t op2, size_t vl) {
+ return vfnmacc_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
new file mode 100644
index 000000000000..79cd39d9c1d3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
@@ -0,0 +1,561 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmadd_vv_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmadd_vf_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmadd_vv_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmadd_vf_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmadd_vv_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmadd_vf_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmadd_vv_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmadd_vf_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmadd_vv_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmadd_vf_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmadd_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmadd_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmadd_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmadd_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmadd_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmadd_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmadd_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmadd_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmadd_vv_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmadd_vf_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfnmadd_vv_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfnmadd_vf_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfnmadd_vv_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfnmadd_vf_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfnmadd_vv_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmadd_vf_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfnmadd_vv_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmadd_vf_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfnmadd_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfnmadd_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfnmadd_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfnmadd_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfnmadd_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfnmadd_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfnmadd_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmadd_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ double op1, vfloat64m8_t op2, size_t vl) {
+ return vfnmadd_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
new file mode 100644
index 000000000000..259e60f48052
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
@@ -0,0 +1,561 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmsac_vv_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmsac_vf_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmsac_vv_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmsac_vf_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmsac_vv_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmsac_vf_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsac_vv_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsac_vf_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsac_vv_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsac_vf_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmsac_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmsac_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmsac_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmsac_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmsac_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmsac_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmsac_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmsac_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmsac_vv_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmsac_vf_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfnmsac_vv_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfnmsac_vf_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfnmsac_vv_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfnmsac_vf_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfnmsac_vv_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsac_vf_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfnmsac_vv_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsac_vf_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfnmsac_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfnmsac_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfnmsac_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfnmsac_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfnmsac_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfnmsac_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfnmsac_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsac_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ double op1, vfloat64m8_t op2, size_t vl) {
+ return vfnmsac_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
new file mode 100644
index 000000000000..a77ee037e8bd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
@@ -0,0 +1,561 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmsub_vv_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfnmsub_vf_f32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmsub_vv_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfnmsub_vf_f32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmsub_vv_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfnmsub_vf_f32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsub_vv_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsub_vf_f32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsub_vv_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsub_vf_f32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmsub_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vfnmsub_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmsub_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vfnmsub_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmsub_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vfnmsub_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmsub_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vfnmsub_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmsub_vv_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
+ float op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfnmsub_vf_f32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfnmsub_vv_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfnmsub_vf_f32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfnmsub_vv_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfnmsub_vf_f32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfnmsub_vv_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfnmsub_vf_f32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfnmsub_vv_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vfnmsub_vf_f32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfnmsub_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ double op1, vfloat64m1_t op2, size_t vl) {
+ return vfnmsub_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfnmsub_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ double op1, vfloat64m2_t op2, size_t vl) {
+ return vfnmsub_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfnmsub_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ double op1, vfloat64m4_t op2, size_t vl) {
+ return vfnmsub_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfnmsub_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfnmsub_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ double op1, vfloat64m8_t op2, size_t vl) {
+ return vfnmsub_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
new file mode 100644
index 000000000000..e47ff2d52edf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrdiv_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfrdiv_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrdiv_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfrdiv_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrdiv_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfrdiv_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrdiv_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfrdiv_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrdiv_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfrdiv_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrdiv_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfrdiv_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrdiv_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfrdiv_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfrdiv_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfrdiv_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfrdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfrdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfrdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfrdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfrdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfrdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfrdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfrdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrdiv_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfrdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
new file mode 100644
index 000000000000..a46874f05ca8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
+ return vfrec7_v_f32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
+ return vfrec7_v_f32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
+ return vfrec7_v_f32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
+ return vfrec7_v_f32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
+ return vfrec7_v_f32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
+ return vfrec7_v_f64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
+ return vfrec7_v_f64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
+ return vfrec7_v_f64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
+ return vfrec7_v_f64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, size_t vl) {
+ return vfrec7_v_f32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, size_t vl) {
+ return vfrec7_v_f32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, size_t vl) {
+ return vfrec7_v_f32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, size_t vl) {
+ return vfrec7_v_f32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, size_t vl) {
+ return vfrec7_v_f32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, size_t vl) {
+ return vfrec7_v_f64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, size_t vl) {
+ return vfrec7_v_f64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, size_t vl) {
+ return vfrec7_v_f64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrec7_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, size_t vl) {
+ return vfrec7_v_f64m8_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
new file mode 100644
index 000000000000..13ab0a1588cd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
@@ -0,0 +1,291 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32m1_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32m2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32m4_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32m8_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax_vs_f64m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax_vs_f64m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax_vs_f64m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax_vs_f64m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmax_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmax_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
new file mode 100644
index 000000000000..6e4b0b114946
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
@@ -0,0 +1,291 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32m1_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32m2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32m4_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32m8_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin_vs_f64m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin_vs_f64m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin_vs_f64m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin_vs_f64m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredmin_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredmin_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
new file mode 100644
index 000000000000..190a6e31c8d0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
@@ -0,0 +1,579 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum_vs_f32m1_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum_vs_f32m2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum_vs_f32m4_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum_vs_f32m8_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum_vs_f64m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum_vs_f64m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum_vs_f64m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum_vs_f64m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredsum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredsum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst,
+ vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32m1_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32m2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32m4_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32m8_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum_vs_f64m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum_vs_f64m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum_vs_f64m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum_vs_f64m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+ vfloat32m2_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+ vfloat32m4_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+ vfloat32m8_t vector,
+ vfloat32m1_t scalar, size_t vl) {
+ return vfredosum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat64m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat64m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat64m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfredosum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
new file mode 100644
index 000000000000..486627504b69
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
+ return vfrsqrt7_v_f32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
+ return vfrsqrt7_v_f32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
+ return vfrsqrt7_v_f32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
+ return vfrsqrt7_v_f32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
+ return vfrsqrt7_v_f32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
+ return vfrsqrt7_v_f64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
+ return vfrsqrt7_v_f64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
+ return vfrsqrt7_v_f64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
+ return vfrsqrt7_v_f64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, size_t vl) {
+ return vfrsqrt7_v_f32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, size_t vl) {
+ return vfrsqrt7_v_f32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, size_t vl) {
+ return vfrsqrt7_v_f32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, size_t vl) {
+ return vfrsqrt7_v_f32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, size_t vl) {
+ return vfrsqrt7_v_f32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, size_t vl) {
+ return vfrsqrt7_v_f64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, size_t vl) {
+ return vfrsqrt7_v_f64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, size_t vl) {
+ return vfrsqrt7_v_f64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, size_t vl) {
+ return vfrsqrt7_v_f64m8_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
new file mode 100644
index 000000000000..d2a29ceab1a4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfrsub_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfrsub_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfrsub_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfrsub_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfrsub_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfrsub_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfrsub_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfrsub_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfrsub_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfrsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfrsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfrsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfrsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfrsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfrsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfrsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfrsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfrsub_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfrsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
new file mode 100644
index 000000000000..a135be3af2d0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c
@@ -0,0 +1,1633 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnj_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnj_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsgnj_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnj_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnj_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnj_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnj_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnj_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnj_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnj_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnj_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnj_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnj_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnj_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnj_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnj_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnj_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnj_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnj_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnj_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnj_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnj_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnj_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnj_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnj_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnj_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjn_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjn_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsgnjn_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjn_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjn_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnjn_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjn_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjn_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnjn_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjn_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjn_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnjn_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjn_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjn_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnjn_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjn_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjn_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnjn_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjn_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjn_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnjn_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjn_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjn_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnjn_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjn_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnjn_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsgnjx_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnjx_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnjx_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnjx_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnjx_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnjx_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnjx_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnjx_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnjx_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsgnj_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnj_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnj_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnj_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnj_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnj_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnj_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnj_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnj_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnj_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnj_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2,
+ size_t vl) {
+ return vfsgnjn_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnjn_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnjn_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnjn_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnjn_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnjn_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnjn_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnjn_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnjn_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjn_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnjn_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2,
+ size_t vl) {
+ return vfsgnjx_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsgnjx_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsgnjx_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsgnjx_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsgnjx_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsgnjx_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsgnjx_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsgnjx_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsgnjx_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsgnjx_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsgnjx_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
new file mode 100644
index 000000000000..d7a857fcf715
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c
@@ -0,0 +1,296 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i32(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfslide1down_vf_f32mf2(vfloat32mf2_t src, float value,
+ size_t vl) {
+ return vfslide1down_vf_f32mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i32(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfslide1down_vf_f32m1(vfloat32m1_t src, float value,
+ size_t vl) {
+ return vfslide1down_vf_f32m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i32(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfslide1down_vf_f32m2(vfloat32m2_t src, float value,
+ size_t vl) {
+ return vfslide1down_vf_f32m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i32(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfslide1down_vf_f32m4(vfloat32m4_t src, float value,
+ size_t vl) {
+ return vfslide1down_vf_f32m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i32(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfslide1down_vf_f32m8(vfloat32m8_t src, float value,
+ size_t vl) {
+ return vfslide1down_vf_f32m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i32(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfslide1down_vf_f64m1(vfloat64m1_t src, double value,
+ size_t vl) {
+ return vfslide1down_vf_f64m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i32(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfslide1down_vf_f64m2(vfloat64m2_t src, double value,
+ size_t vl) {
+ return vfslide1down_vf_f64m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i32(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfslide1down_vf_f64m4(vfloat64m4_t src, double value,
+ size_t vl) {
+ return vfslide1down_vf_f64m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i32(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value,
+ size_t vl) {
+ return vfslide1down_vf_f64m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vfloat32mf2_t src, float value,
+ size_t vl) {
+ return vfslide1down_vf_f32mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask,
+ vfloat32m1_t maskedoff,
+ vfloat32m1_t src, float value,
+ size_t vl) {
+ return vfslide1down_vf_f32m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask,
+ vfloat32m2_t maskedoff,
+ vfloat32m2_t src, float value,
+ size_t vl) {
+ return vfslide1down_vf_f32m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t src, float value,
+ size_t vl) {
+ return vfslide1down_vf_f32m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t src, float value,
+ size_t vl) {
+ return vfslide1down_vf_f32m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask,
+ vfloat64m1_t maskedoff,
+ vfloat64m1_t src, double value,
+ size_t vl) {
+ return vfslide1down_vf_f64m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask,
+ vfloat64m2_t maskedoff,
+ vfloat64m2_t src, double value,
+ size_t vl) {
+ return vfslide1down_vf_f64m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask,
+ vfloat64m4_t maskedoff,
+ vfloat64m4_t src, double value,
+ size_t vl) {
+ return vfslide1down_vf_f64m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1down_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t src, double value,
+ size_t vl) {
+ return vfslide1down_vf_f64m8_m(mask, maskedoff, src, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
new file mode 100644
index 000000000000..34d7cf31b74a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c
@@ -0,0 +1,291 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i32(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32.i64(<vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfslide1up_vf_f32mf2(vfloat32mf2_t src, float value,
+ size_t vl) {
+ return vfslide1up_vf_f32mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i32(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32.i64(<vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfslide1up_vf_f32m1(vfloat32m1_t src, float value,
+ size_t vl) {
+ return vfslide1up_vf_f32m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i32(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32.i64(<vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfslide1up_vf_f32m2(vfloat32m2_t src, float value,
+ size_t vl) {
+ return vfslide1up_vf_f32m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i32(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32.i64(<vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfslide1up_vf_f32m4(vfloat32m4_t src, float value,
+ size_t vl) {
+ return vfslide1up_vf_f32m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i32(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32.i64(<vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfslide1up_vf_f32m8(vfloat32m8_t src, float value,
+ size_t vl) {
+ return vfslide1up_vf_f32m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i32(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64.i64(<vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfslide1up_vf_f64m1(vfloat64m1_t src, double value,
+ size_t vl) {
+ return vfslide1up_vf_f64m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i32(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64.i64(<vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfslide1up_vf_f64m2(vfloat64m2_t src, double value,
+ size_t vl) {
+ return vfslide1up_vf_f64m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i32(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64.i64(<vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value,
+ size_t vl) {
+ return vfslide1up_vf_f64m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i32(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64.i64(<vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value,
+ size_t vl) {
+ return vfslide1up_vf_f64m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vfloat32mf2_t src, float value,
+ size_t vl) {
+ return vfslide1up_vf_f32mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t src, float value,
+ size_t vl) {
+ return vfslide1up_vf_f32m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t src, float value,
+ size_t vl) {
+ return vfslide1up_vf_f32m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t src, float value,
+ size_t vl) {
+ return vfslide1up_vf_f32m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], float [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t src, float value,
+ size_t vl) {
+ return vfslide1up_vf_f32m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t src, double value,
+ size_t vl) {
+ return vfslide1up_vf_f64m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t src, double value,
+ size_t vl) {
+ return vfslide1up_vf_f64m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t src, double value,
+ size_t vl) {
+ return vfslide1up_vf_f64m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfslide1up_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], double [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t src, double value,
+ size_t vl) {
+ return vfslide1up_vf_f64m8_m(mask, maskedoff, src, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
new file mode 100644
index 000000000000..f87c1e9a2536
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
+ return vfsqrt_v_f32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
+ return vfsqrt_v_f32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
+ return vfsqrt_v_f32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
+ return vfsqrt_v_f32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
+ return vfsqrt_v_f32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
+ return vfsqrt_v_f64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
+ return vfsqrt_v_f64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
+ return vfsqrt_v_f64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
+ return vfsqrt_v_f64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, size_t vl) {
+ return vfsqrt_v_f32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, size_t vl) {
+ return vfsqrt_v_f32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, size_t vl) {
+ return vfsqrt_v_f32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, size_t vl) {
+ return vfsqrt_v_f32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, size_t vl) {
+ return vfsqrt_v_f32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, size_t vl) {
+ return vfsqrt_v_f64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, size_t vl) {
+ return vfsqrt_v_f64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, size_t vl) {
+ return vfsqrt_v_f64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, size_t vl) {
+ return vfsqrt_v_f64m8_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
new file mode 100644
index 000000000000..dc8b198db79a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c
@@ -0,0 +1,551 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsub_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsub_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsub_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsub_vf_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsub_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsub_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsub_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsub_vf_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsub_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsub_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsub_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsub_vf_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsub_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsub_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsub_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsub_vf_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsub_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsub_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsub_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsub_vf_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsub_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsub_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsub_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsub_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsub_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsub_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsub_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsub_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsub_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsub_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsub_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsub_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsub_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vfsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vfsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vfsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vfsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vfsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vfsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vfsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vfsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vfsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfsub_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vfsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
new file mode 100644
index 000000000000..4f5ec65bd37d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c
@@ -0,0 +1,491 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwadd_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwadd_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwadd_wv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i32(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
+ return vfwadd_wf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwadd_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwadd_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwadd_wv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i32(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
+ return vfwadd_wf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwadd_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwadd_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwadd_wv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i32(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
+ return vfwadd_wf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwadd_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwadd_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwadd_wv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i32(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
+ return vfwadd_wf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwadd_wv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, float op2, size_t vl) {
+ return vfwadd_wf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwadd_wv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, float op2, size_t vl) {
+ return vfwadd_wf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwadd_wv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, float op2, size_t vl) {
+ return vfwadd_wf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwadd_wv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwadd_wf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, float op2, size_t vl) {
+ return vfwadd_wf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
new file mode 100644
index 000000000000..ae01b1e7f34b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c
@@ -0,0 +1,1118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
+ return vfwcvt_f_x_v_f32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
+ return vfwcvt_f_x_v_f32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
+ return vfwcvt_f_x_v_f32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
+ return vfwcvt_f_x_v_f32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
+ return vfwcvt_f_x_v_f32m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f32mf2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f32m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f32m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f32m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f32m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_x_f_v_i64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_rtz_x_f_v_i64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
+ return vfwcvt_x_f_v_i64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
+ return vfwcvt_rtz_x_f_v_i64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
+ return vfwcvt_x_f_v_i64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
+ return vfwcvt_rtz_x_f_v_i64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
+ return vfwcvt_x_f_v_i64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
+ return vfwcvt_rtz_x_f_v_i64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_xu_f_v_u64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_rtz_xu_f_v_u64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
+ return vfwcvt_xu_f_v_u64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
+ return vfwcvt_rtz_xu_f_v_u64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
+ return vfwcvt_xu_f_v_u64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
+ return vfwcvt_rtz_xu_f_v_u64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
+ return vfwcvt_xu_f_v_u64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
+ return vfwcvt_rtz_xu_f_v_u64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
+ return vfwcvt_f_x_v_f64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
+ return vfwcvt_f_x_v_f64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
+ return vfwcvt_f_x_v_f64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
+ return vfwcvt_f_x_v_f64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i32(<vscale x 1 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_f_f_v_f64m1(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i32(<vscale x 2 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
+ return vfwcvt_f_f_v_f64m2(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i32(<vscale x 4 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
+ return vfwcvt_f_f_v_f64m4(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i32(<vscale x 8 x float> [[SRC:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
+ return vfwcvt_f_f_v_f64m8(src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vint16mf4_t src, size_t vl) {
+ return vfwcvt_f_x_v_f32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vint16mf2_t src, size_t vl) {
+ return vfwcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vint16m1_t src, size_t vl) {
+ return vfwcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vint16m2_t src, size_t vl) {
+ return vfwcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vint16m4_t src, size_t vl) {
+ return vfwcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vuint16mf4_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f32mf2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vuint16mf2_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vuint16m1_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vuint16m2_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vuint16m4_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfwcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfwcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfwcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfwcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_x_f_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfwcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfwcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask,
+ vuint64m1_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_rtz_xu_f_v_u64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfwcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask,
+ vuint64m2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfwcvt_rtz_xu_f_v_u64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfwcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask,
+ vuint64m4_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfwcvt_rtz_xu_f_v_u64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_xu_f_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfwcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfwcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vint32mf2_t src, size_t vl) {
+ return vfwcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vint32m1_t src, size_t vl) {
+ return vfwcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vint32m2_t src, size_t vl) {
+ return vfwcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_x_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vint32m4_t src, size_t vl) {
+ return vfwcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vuint32mf2_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vuint32m1_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vuint32m2_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_xu_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vuint32m4_t src, size_t vl) {
+ return vfwcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t src, size_t vl) {
+ return vfwcvt_f_f_v_f64m1_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t src, size_t vl) {
+ return vfwcvt_f_f_v_f64m2_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t src, size_t vl) {
+ return vfwcvt_f_f_v_f64m4_m(mask, maskedoff, src, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwcvt_f_f_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t src, size_t vl) {
+ return vfwcvt_f_f_v_f64m8_m(mask, maskedoff, src, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
new file mode 100644
index 000000000000..0693c164e3f1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
@@ -0,0 +1,255 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwmacc_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwmacc_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwmacc_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwmacc_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwmacc_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwmacc_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmacc_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmacc_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwmacc_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfwmacc_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwmacc_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfwmacc_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwmacc_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfwmacc_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwmacc_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmacc_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmacc_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
new file mode 100644
index 000000000000..cec278eb5ee9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
@@ -0,0 +1,255 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwmsac_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwmsac_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwmsac_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwmsac_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwmsac_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwmsac_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmsac_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmsac_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwmsac_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfwmsac_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwmsac_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfwmsac_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwmsac_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfwmsac_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwmsac_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmsac_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwmsac_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
new file mode 100644
index 000000000000..b40af3e22f45
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c
@@ -0,0 +1,251 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmul_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwmul_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmul_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwmul_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmul_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwmul_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmul_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwmul_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmul_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwmul_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmul_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwmul_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmul_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwmul_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwmul_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwmul_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
new file mode 100644
index 000000000000..4d15978b6d12
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
@@ -0,0 +1,255 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwnmacc_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwnmacc_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwnmacc_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwnmacc_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwnmacc_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwnmacc_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwnmacc_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwnmacc_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwnmacc_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfwnmacc_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwnmacc_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfwnmacc_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwnmacc_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfwnmacc_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwnmacc_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmacc_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ float op1, vfloat32m4_t op2, size_t vl) {
+ return vfwnmacc_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
new file mode 100644
index 000000000000..44bd012b1726
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
@@ -0,0 +1,255 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwnmsac_vv_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vfwnmsac_vf_f64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwnmsac_vv_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vfwnmsac_vf_f64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwnmsac_vv_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vfwnmsac_vf_f64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwnmsac_vv_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vfwnmsac_vf_f64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwnmsac_vv_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i32(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
+ float op1, vfloat32mf2_t op2, size_t vl) {
+ return vfwnmsac_vf_f64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwnmsac_vv_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i32(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
+ float op1, vfloat32m1_t op2, size_t vl) {
+ return vfwnmsac_vf_f64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwnmsac_vv_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i32(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
+ float op1, vfloat32m2_t op2, size_t vl) {
+ return vfwnmsac_vf_f64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwnmsac_vv_f64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwnmsac_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i32(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
+ float op1, vfloat32m4_t op2, size_t vl) {
+ return vfwnmsac_vf_f64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
new file mode 100644
index 000000000000..c010bcf608a8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
@@ -0,0 +1,171 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst,
+ vfloat32m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst,
+ vfloat32m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst,
+ vfloat32m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst,
+ vfloat32m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat32m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat32m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat32m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+ vfloat32m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredosum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
new file mode 100644
index 000000000000..023ecd72ccf4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
@@ -0,0 +1,171 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
+ vfloat32m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum_vs_f32m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
+ vfloat32m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum_vs_f32m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
+ vfloat32m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum_vs_f32m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
+ vfloat32m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum_vs_f32m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat32mf2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+ vfloat32m1_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+ vfloat32m2_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+ vfloat32m4_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+ vfloat32m8_t vector,
+ vfloat64m1_t scalar, size_t vl) {
+ return vfwredsum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
new file mode 100644
index 000000000000..9d75dc9425f8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c
@@ -0,0 +1,491 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_vv_f64m1(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwsub_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_vf_f64m1(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwsub_vf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_wv_f64m1(vfloat64m1_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwsub_wv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i32(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32.i64(<vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_wf_f64m1(vfloat64m1_t op1, float op2, size_t vl) {
+ return vfwsub_wf_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_vv_f64m2(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwsub_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_vf_f64m2(vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwsub_vf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwsub_wv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i32(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32.i64(<vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) {
+ return vfwsub_wf_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwsub_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwsub_vf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwsub_wv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i32(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32.i64(<vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) {
+ return vfwsub_wf_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwsub_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwsub_vf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwsub_wv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i32(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32.i64(<vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) {
+ return vfwsub_wf_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vfwsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vfwsub_wv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, float op2, size_t vl) {
+ return vfwsub_wf_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vfwsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vfwsub_wv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, float op2, size_t vl) {
+ return vfwsub_wf_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vfwsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vfwsub_wv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, float op2, size_t vl) {
+ return vfwsub_wf_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_vf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vfwsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vfwsub_wv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwsub_wf_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, float op2, size_t vl) {
+ return vfwsub_wf_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
new file mode 100644
index 000000000000..314abf18476b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
@@ -0,0 +1,598 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vid_v_u8mf8(size_t vl) { return vid_v_u8mf8(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vid_v_u8mf4(size_t vl) { return vid_v_u8mf4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vid_v_u8mf2(size_t vl) { return vid_v_u8mf2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vid_v_u8m1(size_t vl) { return vid_v_u8m1(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vid_v_u8m2(size_t vl) { return vid_v_u8m2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vid_v_u8m4(size_t vl) { return vid_v_u8m4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vid_v_u8m8(size_t vl) { return vid_v_u8m8(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vid_v_u16mf4(size_t vl) { return vid_v_u16mf4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vid_v_u16mf2(size_t vl) { return vid_v_u16mf2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vid_v_u16m1(size_t vl) { return vid_v_u16m1(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vid_v_u16m2(size_t vl) { return vid_v_u16m2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vid_v_u16m4(size_t vl) { return vid_v_u16m4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vid_v_u16m8(size_t vl) { return vid_v_u16m8(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2(size_t vl) { return vid_v_u32mf2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vid_v_u32m1(size_t vl) { return vid_v_u32m1(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vid_v_u32m2(size_t vl) { return vid_v_u32m2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vid_v_u32m4(size_t vl) { return vid_v_u32m4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vid_v_u32m8(size_t vl) { return vid_v_u32m8(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vid_v_u64m1(size_t vl) { return vid_v_u64m1(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vid_v_u64m2(size_t vl) { return vid_v_u64m2(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vid_v_u64m4(size_t vl) { return vid_v_u64m4(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vid_v_u64m8(size_t vl) { return vid_v_u64m8(vl); }
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vid_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ size_t vl) {
+ return vid_v_u8mf8_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vid_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ size_t vl) {
+ return vid_v_u8mf4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vid_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ size_t vl) {
+ return vid_v_u8mf2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) {
+ return vid_v_u8m1_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) {
+ return vid_v_u8m2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) {
+ return vid_v_u8m4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) {
+ return vid_v_u8m8_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vid_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ size_t vl) {
+ return vid_v_u16mf4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vid_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ size_t vl) {
+ return vid_v_u16mf2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ size_t vl) {
+ return vid_v_u16m1_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ size_t vl) {
+ return vid_v_u16m2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ size_t vl) {
+ return vid_v_u16m4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ size_t vl) {
+ return vid_v_u16m8_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ size_t vl) {
+ return vid_v_u32mf2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ size_t vl) {
+ return vid_v_u32m1_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ size_t vl) {
+ return vid_v_u32m2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ size_t vl) {
+ return vid_v_u32m4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ size_t vl) {
+ return vid_v_u32m8_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ size_t vl) {
+ return vid_v_u64m1_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ size_t vl) {
+ return vid_v_u64m2_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ size_t vl) {
+ return vid_v_u64m4_m(mask, maskedoff, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vid_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vid_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ size_t vl) {
+ return vid_v_u64m8_m(mask, maskedoff, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
new file mode 100644
index 000000000000..dd7c7cd17b50
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
@@ -0,0 +1,646 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) {
+ return viota_m_u8mf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) {
+ return viota_m_u8mf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) {
+ return viota_m_u8mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) {
+ return viota_m_u8m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) {
+ return viota_m_u8m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) {
+ return viota_m_u8m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) {
+ return viota_m_u8m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) {
+ return viota_m_u16mf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) {
+ return viota_m_u16mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) {
+ return viota_m_u16m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) {
+ return viota_m_u16m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) {
+ return viota_m_u16m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) {
+ return viota_m_u16m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) {
+ return viota_m_u32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) {
+ return viota_m_u32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) {
+ return viota_m_u32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) {
+ return viota_m_u32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) {
+ return viota_m_u32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) {
+ return viota_m_u64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) {
+ return viota_m_u64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) {
+ return viota_m_u64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) {
+ return viota_m_u64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota_m_u8mf8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota_m_u8mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota_m_u8mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota_m_u8m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota_m_u8m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vbool2_t op1, size_t vl) {
+ return viota_m_u8m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vbool1_t op1, size_t vl) {
+ return viota_m_u8m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota_m_u16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota_m_u16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota_m_u16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota_m_u16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota_m_u16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vbool2_t op1, size_t vl) {
+ return viota_m_u16m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota_m_u32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota_m_u32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota_m_u32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota_m_u32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vbool4_t op1, size_t vl) {
+ return viota_m_u32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vbool64_t op1, size_t vl) {
+ return viota_m_u64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vbool32_t op1, size_t vl) {
+ return viota_m_u64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vbool16_t op1, size_t vl) {
+ return viota_m_u64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_viota_m_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_viota_m_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vbool8_t op1, size_t vl) {
+ return viota_m_u64m8_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c
index ea04158f63d9..ebbd552b8a7f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c
@@ -1,10 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
-// RUN: -Werror -Wall -o - %s >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
@@ -1704,3 +1705,115 @@ vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const
vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) {
return vle64_v_f64m8_m(mask, maskedoff, base, vl);
}
+
+// CHECK-RV32-LABEL: @test_vle1_v_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vle1.nxv64i1.i32(<vscale x 64 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle1_v_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vle1.nxv64i1.i64(<vscale x 64 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP1]]
+//
+vbool1_t test_vle1_v_b1(const uint8_t *base, size_t vl) {
+ return vle1_v_b1(base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle1_v_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vle1.nxv32i1.i32(<vscale x 32 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle1_v_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vle1.nxv32i1.i64(<vscale x 32 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP1]]
+//
+vbool2_t test_vle1_v_b2(const uint8_t *base, size_t vl) {
+ return vle1_v_b2(base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle1_v_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vle1.nxv16i1.i32(<vscale x 16 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle1_v_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vle1.nxv16i1.i64(<vscale x 16 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP1]]
+//
+vbool4_t test_vle1_v_b4(const uint8_t *base, size_t vl) {
+ return vle1_v_b4(base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle1_v_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vle1.nxv8i1.i32(<vscale x 8 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle1_v_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vle1.nxv8i1.i64(<vscale x 8 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP1]]
+//
+vbool8_t test_vle1_v_b8(const uint8_t *base, size_t vl) {
+ return vle1_v_b8(base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle1_v_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vle1.nxv4i1.i32(<vscale x 4 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle1_v_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vle1.nxv4i1.i64(<vscale x 4 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP1]]
+//
+vbool16_t test_vle1_v_b16(const uint8_t *base, size_t vl) {
+ return vle1_v_b16(base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle1_v_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vle1.nxv2i1.i32(<vscale x 2 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle1_v_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vle1.nxv2i1.i64(<vscale x 2 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP1]]
+//
+vbool32_t test_vle1_v_b32(const uint8_t *base, size_t vl) {
+ return vle1_v_b32(base, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle1_v_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1.i32(<vscale x 1 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vle1_v_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vle1.nxv1i1.i64(<vscale x 1 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP1]]
+//
+vbool64_t test_vle1_v_b64(const uint8_t *base, size_t vl) {
+ return vle1_v_b64(base, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c
new file mode 100644
index 000000000000..58ab525b03fd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c
@@ -0,0 +1,2348 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \
+// RUN: -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s \
+// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN: -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s \
+// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f \
+// RUN: -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -S -o - %s >/dev/null 2>%t
+// RUN: FileCheck --check-prefix=ASM --allow-empty %s <%t
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP2]]
+//
+vint8mf8_t test_vle8ff_v_i8mf8 (const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8mf8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP2]]
+//
+vint8mf4_t test_vle8ff_v_i8mf4 (const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8mf4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP2]]
+//
+vint8mf2_t test_vle8ff_v_i8mf2 (const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8mf2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP2]]
+//
+vint8m1_t test_vle8ff_v_i8m1 (const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8m1(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP2]]
+//
+vint8m2_t test_vle8ff_v_i8m2 (const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8m2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP2]]
+//
+vint8m4_t test_vle8ff_v_i8m4 (const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8m4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP2]]
+//
+vint8m8_t test_vle8ff_v_i8m8 (const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8m8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP2]]
+//
+vuint8mf8_t test_vle8ff_v_u8mf8 (const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8mf8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP2]]
+//
+vuint8mf4_t test_vle8ff_v_u8mf4 (const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8mf4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP2]]
+//
+vuint8mf2_t test_vle8ff_v_u8mf2 (const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8mf2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP2]]
+//
+vuint8m1_t test_vle8ff_v_u8m1 (const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8m1(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP2]]
+//
+vuint8m2_t test_vle8ff_v_u8m2 (const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8m2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP2]]
+//
+vuint8m4_t test_vle8ff_v_u8m4 (const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8m4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP2]]
+//
+vuint8m8_t test_vle8ff_v_u8m8 (const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8m8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP2]]
+//
+vint8mf8_t test_vle8ff_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP2]]
+//
+vint8mf4_t test_vle8ff_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8mf4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP2]]
+//
+vint8mf2_t test_vle8ff_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8mf2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP2]]
+//
+vint8m1_t test_vle8ff_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8m1_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP2]]
+//
+vint8m2_t test_vle8ff_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8m2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP2]]
+//
+vint8m4_t test_vle8ff_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8m4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP2]]
+//
+vint8m8_t test_vle8ff_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_i8m8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i32 } @llvm.riscv.vleff.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i8>, i64 } @llvm.riscv.vleff.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP2]]
+//
+vuint8mf8_t test_vle8ff_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8mf8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i32 } @llvm.riscv.vleff.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i8>, i64 } @llvm.riscv.vleff.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP2]]
+//
+vuint8mf4_t test_vle8ff_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8mf4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i32 } @llvm.riscv.vleff.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i8>, i64 } @llvm.riscv.vleff.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP2]]
+//
+vuint8mf2_t test_vle8ff_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8mf2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP2]]
+//
+vuint8m1_t test_vle8ff_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8m1_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i8>, i64 } @llvm.riscv.vleff.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP2]]
+//
+vuint8m2_t test_vle8ff_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8m2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i8>, i64 } @llvm.riscv.vleff.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP2]]
+//
+vuint8m4_t test_vle8ff_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8m4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle8ff_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 64 x i8>, i64 } @llvm.riscv.vleff.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 64 x i8>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP2]]
+//
+vuint8m8_t test_vle8ff_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl) {
+ return vle8ff_v_u8m8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP2]]
+//
+vint16mf4_t test_vle16ff_v_i16mf4 (const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16mf4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP2]]
+//
+vint16mf2_t test_vle16ff_v_i16mf2 (const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16mf2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP2]]
+//
+vint16m1_t test_vle16ff_v_i16m1 (const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16m1(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP2]]
+//
+vint16m2_t test_vle16ff_v_i16m2 (const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16m2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP2]]
+//
+vint16m4_t test_vle16ff_v_i16m4 (const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16m4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP2]]
+//
+vint16m8_t test_vle16ff_v_i16m8 (const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16m8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP2]]
+//
+vuint16mf4_t test_vle16ff_v_u16mf4 (const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16mf4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP2]]
+//
+vuint16mf2_t test_vle16ff_v_u16mf2 (const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16mf2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP2]]
+//
+vuint16m1_t test_vle16ff_v_u16m1 (const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16m1(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP2]]
+//
+vuint16m2_t test_vle16ff_v_u16m2 (const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16m2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP2]]
+//
+vuint16m4_t test_vle16ff_v_u16m4 (const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16m4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP2]]
+//
+vuint16m8_t test_vle16ff_v_u16m8 (const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16m8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP2]]
+//
+vint16mf4_t test_vle16ff_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16mf4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP2]]
+//
+vint16mf2_t test_vle16ff_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16mf2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP2]]
+//
+vint16m1_t test_vle16ff_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16m1_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP2]]
+//
+vint16m2_t test_vle16ff_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16m2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP2]]
+//
+vint16m4_t test_vle16ff_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16m4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP2]]
+//
+vint16m8_t test_vle16ff_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_i16m8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i32 } @llvm.riscv.vleff.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i16>, i64 } @llvm.riscv.vleff.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP2]]
+//
+vuint16mf4_t test_vle16ff_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16mf4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i32 } @llvm.riscv.vleff.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i16>, i64 } @llvm.riscv.vleff.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP2]]
+//
+vuint16mf2_t test_vle16ff_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16mf2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i16>, i64 } @llvm.riscv.vleff.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP2]]
+//
+vuint16m1_t test_vle16ff_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16m1_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i16>, i64 } @llvm.riscv.vleff.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP2]]
+//
+vuint16m2_t test_vle16ff_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16m2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i16>, i64 } @llvm.riscv.vleff.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP2]]
+//
+vuint16m4_t test_vle16ff_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16m4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle16ff_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 32 x i16>, i64 } @llvm.riscv.vleff.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 32 x i16>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP2]]
+//
+vuint16m8_t test_vle16ff_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl) {
+ return vle16ff_v_u16m8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP2]]
+//
+vint32mf2_t test_vle32ff_v_i32mf2 (const int32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_i32mf2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP2]]
+//
+vint32m1_t test_vle32ff_v_i32m1 (const int32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_i32m1(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP2]]
+//
+vint32m2_t test_vle32ff_v_i32m2 (const int32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_i32m2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP2]]
+//
+vint32m4_t test_vle32ff_v_i32m4 (const int32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_i32m4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP2]]
+//
+vint32m8_t test_vle32ff_v_i32m8 (const int32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_i32m8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP2]]
+//
+vuint32mf2_t test_vle32ff_v_u32mf2 (const uint32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_u32mf2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP2]]
+//
+vuint32m1_t test_vle32ff_v_u32m1 (const uint32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_u32m1(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP2]]
+//
+vuint32m2_t test_vle32ff_v_u32m2 (const uint32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_u32m2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP2]]
+//
+vuint32m4_t test_vle32ff_v_u32m4 (const uint32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_u32m4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP2]]
+//
+vuint32m8_t test_vle32ff_v_u32m8 (const uint32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_u32m8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x float>, i32 } @llvm.riscv.vleff.nxv1f32.i32(<vscale x 1 x float>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.nxv1f32.i64(<vscale x 1 x float>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP2]]
+//
+vfloat32mf2_t test_vle32ff_v_f32mf2 (const float *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_f32mf2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x float>, i32 } @llvm.riscv.vleff.nxv2f32.i32(<vscale x 2 x float>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.nxv2f32.i64(<vscale x 2 x float>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP2]]
+//
+vfloat32m1_t test_vle32ff_v_f32m1 (const float *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_f32m1(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x float>, i32 } @llvm.riscv.vleff.nxv4f32.i32(<vscale x 4 x float>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.nxv4f32.i64(<vscale x 4 x float>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP2]]
+//
+vfloat32m2_t test_vle32ff_v_f32m2 (const float *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_f32m2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x float>, i32 } @llvm.riscv.vleff.nxv8f32.i32(<vscale x 8 x float>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.nxv8f32.i64(<vscale x 8 x float>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP2]]
+//
+vfloat32m4_t test_vle32ff_v_f32m4 (const float *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_f32m4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x float>, i32 } @llvm.riscv.vleff.nxv16f32.i32(<vscale x 16 x float>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x float>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x float>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.nxv16f32.i64(<vscale x 16 x float>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x float>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x float>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP2]]
+//
+vfloat32m8_t test_vle32ff_v_f32m8 (const float *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_f32m8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP2]]
+//
+vint32mf2_t test_vle32ff_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_i32mf2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP2]]
+//
+vint32m1_t test_vle32ff_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_i32m1_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP2]]
+//
+vint32m2_t test_vle32ff_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_i32m2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP2]]
+//
+vint32m4_t test_vle32ff_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_i32m4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP2]]
+//
+vint32m8_t test_vle32ff_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_i32m8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i32 } @llvm.riscv.vleff.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i32>, i64 } @llvm.riscv.vleff.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP2]]
+//
+vuint32mf2_t test_vle32ff_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_u32mf2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i32>, i64 } @llvm.riscv.vleff.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP2]]
+//
+vuint32m1_t test_vle32ff_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_u32m1_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i32>, i64 } @llvm.riscv.vleff.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP2]]
+//
+vuint32m2_t test_vle32ff_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_u32m2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i32>, i64 } @llvm.riscv.vleff.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP2]]
+//
+vuint32m4_t test_vle32ff_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_u32m4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x i32>, i64 } @llvm.riscv.vleff.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x i32>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP2]]
+//
+vuint32m8_t test_vle32ff_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_u32m8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x float>, i32 } @llvm.riscv.vleff.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x float>, i64 } @llvm.riscv.vleff.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x float>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP2]]
+//
+vfloat32mf2_t test_vle32ff_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_f32mf2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x float>, i32 } @llvm.riscv.vleff.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x float>, i64 } @llvm.riscv.vleff.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x float>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x float>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP2]]
+//
+vfloat32m1_t test_vle32ff_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_f32m1_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x float>, i32 } @llvm.riscv.vleff.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x float>, i64 } @llvm.riscv.vleff.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x float>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x float>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP2]]
+//
+vfloat32m2_t test_vle32ff_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_f32m2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x float>, i32 } @llvm.riscv.vleff.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x float>, i64 } @llvm.riscv.vleff.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x float>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x float>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP2]]
+//
+vfloat32m4_t test_vle32ff_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_f32m4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle32ff_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x float>, i32 } @llvm.riscv.vleff.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x float>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x float>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 16 x float>, i64 } @llvm.riscv.vleff.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 16 x float>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 16 x float>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP2]]
+//
+vfloat32m8_t test_vle32ff_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) {
+ return vle32ff_v_f32m8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP2]]
+//
+vint64m1_t test_vle64ff_v_i64m1 (const int64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_i64m1(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP2]]
+//
+vint64m2_t test_vle64ff_v_i64m2 (const int64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_i64m2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP2]]
+//
+vint64m4_t test_vle64ff_v_i64m4 (const int64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_i64m4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP2]]
+//
+vint64m8_t test_vle64ff_v_i64m8 (const int64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_i64m8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP2]]
+//
+vuint64m1_t test_vle64ff_v_u64m1 (const uint64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_u64m1(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP2]]
+//
+vuint64m2_t test_vle64ff_v_u64m2 (const uint64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_u64m2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP2]]
+//
+vuint64m4_t test_vle64ff_v_u64m4 (const uint64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_u64m4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP2]]
+//
+vuint64m8_t test_vle64ff_v_u64m8 (const uint64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_u64m8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.nxv1f64.i32(<vscale x 1 x double>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x double>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x double>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.nxv1f64.i64(<vscale x 1 x double>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x double>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x double>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP2]]
+//
+vfloat64m1_t test_vle64ff_v_f64m1 (const double *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_f64m1(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x double>, i32 } @llvm.riscv.vleff.nxv2f64.i32(<vscale x 2 x double>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x double>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x double>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.nxv2f64.i64(<vscale x 2 x double>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x double>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x double>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP2]]
+//
+vfloat64m2_t test_vle64ff_v_f64m2 (const double *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_f64m2(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x double>, i32 } @llvm.riscv.vleff.nxv4f64.i32(<vscale x 4 x double>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x double>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x double>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.nxv4f64.i64(<vscale x 4 x double>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x double>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x double>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP2]]
+//
+vfloat64m4_t test_vle64ff_v_f64m4 (const double *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_f64m4(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x double>, i32 } @llvm.riscv.vleff.nxv8f64.i32(<vscale x 8 x double>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x double>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x double>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.nxv8f64.i64(<vscale x 8 x double>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x double>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x double>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP2]]
+//
+vfloat64m8_t test_vle64ff_v_f64m8 (const double *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_f64m8(base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP2]]
+//
+vint64m1_t test_vle64ff_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_i64m1_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP2]]
+//
+vint64m2_t test_vle64ff_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_i64m2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP2]]
+//
+vint64m4_t test_vle64ff_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_i64m4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP2]]
+//
+vint64m8_t test_vle64ff_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_i64m8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x i64>, i64 } @llvm.riscv.vleff.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP2]]
+//
+vuint64m1_t test_vle64ff_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_u64m1_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x i64>, i64 } @llvm.riscv.vleff.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP2]]
+//
+vuint64m2_t test_vle64ff_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_u64m2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x i64>, i64 } @llvm.riscv.vleff.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP2]]
+//
+vuint64m4_t test_vle64ff_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_u64m4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x i64>, i64 } @llvm.riscv.vleff.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x i64>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP2]]
+//
+vuint64m8_t test_vle64ff_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_u64m8_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x double>, i32 } @llvm.riscv.vleff.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x double>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x double>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 1 x double>, i64 } @llvm.riscv.vleff.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x double>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x double>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP2]]
+//
+vfloat64m1_t test_vle64ff_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_f64m1_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x double>, i32 } @llvm.riscv.vleff.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x double>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x double>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 2 x double>, i64 } @llvm.riscv.vleff.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 2 x double>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 2 x double>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP2]]
+//
+vfloat64m2_t test_vle64ff_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_f64m2_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x double>, i32 } @llvm.riscv.vleff.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x double>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x double>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 4 x double>, i64 } @llvm.riscv.vleff.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 4 x double>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 4 x double>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP2]]
+//
+vfloat64m4_t test_vle64ff_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_f64m4_m(mask, maskedoff, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vle64ff_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x double>, i32 } @llvm.riscv.vleff.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x double>, i32 } [[TMP1]], 0
+// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x double>, i32 } [[TMP1]], 1
+// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP2]]
+//
+// CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { <vscale x 8 x double>, i64 } @llvm.riscv.vleff.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 8 x double>, i64 } [[TMP1]], 0
+// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 8 x double>, i64 } [[TMP1]], 1
+// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP2]]
+//
+vfloat64m8_t test_vle64ff_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl) {
+ return vle64ff_v_f64m8_m(mask, maskedoff, base, new_vl, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
new file mode 100644
index 000000000000..21ca49ded56b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c
@@ -0,0 +1,6123 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S > /dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_i8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_i8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_i8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_i8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_i8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t testuxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8_v_i8m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t testuxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vloxei8_v_i8m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_i8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_i8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_i8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_i8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_i8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t testuxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16_v_i8m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_i8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_i8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_i8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_i8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_i8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_i8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_i8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_i8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_i8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_i16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_i16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_i16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_i16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_i16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t testuxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8_v_i16m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_i16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_i16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_i16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_i16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_i16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t testuxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16_v_i16m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_i16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_i16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_i16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_i16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_i16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_i16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_i16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_i16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_i16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_i32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_i32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_i32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_i32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_i32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_i32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_i32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_i32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_i32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_i32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_i32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_i32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_i32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_i32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_i32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_i32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_i32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_i32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_i32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_i64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_i64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_i64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_i64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_i64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_i64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_i64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_i64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_i64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_i64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_i64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_i64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_i64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_i64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_i64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_i64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_u8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_u8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_u8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_u8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_u8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t testuxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8_v_u8m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t testuxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vloxei8_v_u8m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_u8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_u8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_u8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_u8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_u8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t testuxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16_v_u8m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_u8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_u8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_u8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_u8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_u8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_u8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_u8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_u8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_u8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_u16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_u16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_u16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_u16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_u16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t testuxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8_v_u16m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_u16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_u16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_u16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_u16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_u16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t testuxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16_v_u16m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_u16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_u16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_u16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_u16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_u16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_u16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_u16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_u16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_u16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_u32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_u32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_u32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_u32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_u32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_u32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_u32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_u32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_u32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_u32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_u32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_u32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_u32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_u32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_u32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_u32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_u32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_u32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_u32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_u64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_u64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_u64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_u64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_u64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_u64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_u64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_u64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_u64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_u64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_u64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_u64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_u64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_u64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_u64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_u64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_f32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_f32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_f32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_f32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_f32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_f32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_f32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_f32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_f32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_f32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_f32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_f32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_f32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_f32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_f32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_f32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_f32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_f32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_f32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_f64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_f64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_f64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_f64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_f64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_f64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_f64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_f64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_f64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_f64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_f64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_f64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_f64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_f64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_f64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_f64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t testuxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t testuxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vloxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t testuxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t testuxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t testuxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t testuxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t testuxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vloxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t testuxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t testuxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vloxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t testuxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vloxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
+ return vloxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
+ return vloxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
+ return vloxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
+ return vloxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
+ return vloxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
+ return vloxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
+ return vloxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
+ return vloxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
+ return vloxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
+ return vloxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
+ return vloxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
+ return vloxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
+ return vloxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
+ return vloxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
+ return vloxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
+ return vloxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
+ return vloxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
+ return vloxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
+ return vloxei64_v_f64m8_m(mask, maskedoff, base, bindex, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
new file mode 100644
index 000000000000..f4d108df25f0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c
@@ -0,0 +1,1862 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vlse8_v_i8mf8(const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_i8mf8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vlse8_v_i8mf4(const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_i8mf4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vlse8_v_i8mf2(const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_i8mf2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vlse8_v_i8m1(const int8_t *base, ptrdiff_t bstride, size_t vl) {
+ return vlse8_v_i8m1(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vlse8_v_i8m2(const int8_t *base, ptrdiff_t bstride, size_t vl) {
+ return vlse8_v_i8m2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vlse8_v_i8m4(const int8_t *base, ptrdiff_t bstride, size_t vl) {
+ return vlse8_v_i8m4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t test_vlse8_v_i8m8(const int8_t *base, ptrdiff_t bstride, size_t vl) {
+ return vlse8_v_i8m8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vlse16_v_i16mf4(const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16mf4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vlse16_v_i16mf2(const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16mf2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vlse16_v_i16m1(const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16m1(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vlse16_v_i16m2(const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16m2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vlse16_v_i16m4(const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16m4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vlse16_v_i16m8(const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16m8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vlse32_v_i32mf2(const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_i32mf2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vlse32_v_i32m1(const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_i32m1(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vlse32_v_i32m2(const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_i32m2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vlse32_v_i32m4(const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_i32m4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vlse32_v_i32m8(const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_i32m8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vlse64_v_i64m1(const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_i64m1(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vlse64_v_i64m2(const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_i64m2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vlse64_v_i64m4(const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_i64m4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vlse64_v_i64m8(const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_i64m8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vlse8_v_u8mf8(const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8mf8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vlse8_v_u8mf4(const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8mf4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vlse8_v_u8mf2(const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8mf2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vlse8_v_u8m1(const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8m1(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vlse8_v_u8m2(const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8m2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vlse8_v_u8m4(const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8m4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t test_vlse8_v_u8m8(const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8m8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vlse16_v_u16mf4(const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16mf4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vlse16_v_u16mf2(const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16mf2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vlse16_v_u16m1(const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16m1(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vlse16_v_u16m2(const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16m2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vlse16_v_u16m4(const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16m4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vlse16_v_u16m8(const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16m8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vlse32_v_u32mf2(const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_u32mf2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vlse32_v_u32m1(const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_u32m1(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vlse32_v_u32m2(const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_u32m2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vlse32_v_u32m4(const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_u32m4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vlse32_v_u32m8(const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_u32m8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vlse64_v_u64m1(const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_u64m1(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vlse64_v_u64m2(const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_u64m2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vlse64_v_u64m4(const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_u64m4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vlse64_v_u64m8(const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_u64m8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32.i32(<vscale x 1 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32.i64(<vscale x 1 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vlse32_v_f32mf2(const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_f32mf2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32.i32(<vscale x 2 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32.i64(<vscale x 2 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vlse32_v_f32m1(const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_f32m1(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32.i32(<vscale x 4 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32.i64(<vscale x 4 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vlse32_v_f32m2(const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_f32m2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32.i32(<vscale x 8 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32.i64(<vscale x 8 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vlse32_v_f32m4(const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_f32m4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32.i32(<vscale x 16 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32.i64(<vscale x 16 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vlse32_v_f32m8(const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_f32m8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64.i32(<vscale x 1 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64.i64(<vscale x 1 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vlse64_v_f64m1(const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_f64m1(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64.i32(<vscale x 2 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64.i64(<vscale x 2 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vlse64_v_f64m2(const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_f64m2(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64.i32(<vscale x 4 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64.i64(<vscale x 4 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vlse64_v_f64m4(const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_f64m4(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64.i32(<vscale x 8 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64.i64(<vscale x 8 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vlse64_v_f64m8(const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_f64m8(base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_i8mf8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_i8mf4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_i8mf2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_i8m1_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_i8m2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_i8m4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ const int8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_i8m8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16mf4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16mf2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16m1_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16m2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16m4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ const int16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_i16m8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_i32mf2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_i32m1_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_i32m2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_i32m4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ const int32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_i32m8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_i64m1_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_i64m2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_i64m4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ const int64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_i64m8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8mf8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8mf4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8mf2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8m1_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8m2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8m4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ const uint8_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse8_v_u8m8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16mf4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16mf2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16m1_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16m2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16m4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ const uint16_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse16_v_u16m8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_u32mf2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_u32m1_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_u32m2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_u32m4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ const uint32_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_u32m8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_u64m1_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_u64m2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_u64m4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ const uint64_t *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_u64m8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_f32mf2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_f32m1_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_f32m2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_f32m4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ const float *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse32_v_f32m8_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_f64m1_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_f64m2_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_f64m4_m(mask, maskedoff, base, bstride, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlse64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ const double *base, ptrdiff_t bstride,
+ size_t vl) {
+ return vlse64_v_f64m8_m(mask, maskedoff, base, bstride, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
new file mode 100644
index 000000000000..ae34fbe1909a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c
@@ -0,0 +1,6123 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S > /dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_i8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_i8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_i8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_i8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_i8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t testuxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8_v_i8m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t testuxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vluxei8_v_i8m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_i8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_i8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_i8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_i8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_i8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t testuxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16_v_i8m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_i8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_i8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_i8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_i8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_i8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_i8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_i8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_i8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_i8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_i16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_i16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_i16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_i16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_i16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t testuxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8_v_i16m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_i16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_i16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_i16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_i16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_i16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t testuxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16_v_i16m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_i16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_i16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_i16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_i16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_i16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_i16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_i16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_i16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_i16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_i32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_i32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_i32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_i32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_i32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_i32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_i32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_i32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_i32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_i32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_i32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_i32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_i32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_i32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_i32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_i32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_i32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_i32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_i32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_i64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_i64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_i64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_i64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_i64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_i64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_i64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_i64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_i64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_i64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_i64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_i64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_i64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_i64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_i64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_i64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_u8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_u8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_u8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_u8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_u8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t testuxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8_v_u8m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t testuxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vluxei8_v_u8m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_u8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_u8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_u8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_u8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_u8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t testuxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16_v_u8m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_u8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_u8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_u8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_u8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_u8m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_u8mf8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_u8mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_u8mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_u8m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_u16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_u16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_u16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_u16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_u16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t testuxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8_v_u16m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_u16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_u16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_u16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_u16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_u16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t testuxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16_v_u16m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_u16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_u16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_u16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_u16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_u16m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_u16mf4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_u16mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_u16m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_u16m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_u32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_u32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_u32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_u32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_u32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_u32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_u32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_u32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_u32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_u32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_u32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_u32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_u32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_u32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_u32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_u32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_u32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_u32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_u32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_u64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_u64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_u64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_u64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_u64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_u64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_u64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_u64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_u64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_u64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_u64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_u64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_u64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_u64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_u64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_u64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_f32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_f32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_f32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_f32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_f32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_f32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_f32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_f32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_f32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_f32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_f32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_f32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_f32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_f32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32.i32(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_f32m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64.i32(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_f32mf2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64.i32(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_f32m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64.i32(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_f32m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64.i32(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_f32m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_f64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_f64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_f64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_f64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_f64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_f64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_f64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_f64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_f64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_f64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_f64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_f64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64.i32(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_f64m1(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64.i32(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_f64m2(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64.i32(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_f64m4(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64.i32(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_f64m8(base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t testuxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vint8m8_t testuxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vluxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vint8m4_t testuxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vint8m2_t testuxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vint8mf8_t testuxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vint8mf4_t testuxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vint8mf2_t testuxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vint8m1_t testuxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t testuxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vint16m8_t testuxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vint16m4_t testuxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vint16mf4_t testuxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vint16mf2_t testuxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vint16m1_t testuxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vint16m2_t testuxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vint32m8_t testuxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vint32mf2_t testuxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vint32m1_t testuxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vint32m2_t testuxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vint32m4_t testuxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vint64m1_t testuxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vint64m2_t testuxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vint64m4_t testuxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vint64m8_t testuxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t testuxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP1]]
+//
+vuint8m8_t testuxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) {
+ return vluxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP1]]
+//
+vuint8m4_t testuxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP1]]
+//
+vuint8m2_t testuxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP1]]
+//
+vuint8mf8_t testuxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP1]]
+//
+vuint8mf4_t testuxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP1]]
+//
+vuint8mf2_t testuxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP1]]
+//
+vuint8m1_t testuxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t testuxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) {
+ return vluxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP1]]
+//
+vuint16m8_t testuxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) {
+ return vluxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP1]]
+//
+vuint16m4_t testuxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP1]]
+//
+vuint16mf4_t testuxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP1]]
+//
+vuint16mf2_t testuxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP1]]
+//
+vuint16m1_t testuxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP1]]
+//
+vuint16m2_t testuxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+//
+vuint32m8_t testuxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP1]]
+//
+vuint32mf2_t testuxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP1]]
+//
+vuint32m1_t testuxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP1]]
+//
+vuint32m2_t testuxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+//
+vuint32m4_t testuxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP1]]
+//
+vuint64m1_t testuxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP1]]
+//
+vuint64m2_t testuxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP1]]
+//
+vuint64m4_t testuxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP1]]
+//
+vuint64m8_t testuxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) {
+ return vluxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) {
+ return vluxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP1]]
+//
+vfloat32m8_t testuxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) {
+ return vluxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP1]]
+//
+vfloat32mf2_t testuxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP1]]
+//
+vfloat32m1_t testuxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP1]]
+//
+vfloat32m2_t testuxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP1]]
+//
+vfloat32m4_t testuxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) {
+ return vluxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) {
+ return vluxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) {
+ return vluxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei8_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei8_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) {
+ return vluxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) {
+ return vluxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) {
+ return vluxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) {
+ return vluxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei16_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei16_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) {
+ return vluxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) {
+ return vluxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) {
+ return vluxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) {
+ return vluxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei32_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei32_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) {
+ return vluxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP1]]
+//
+vfloat64m1_t testuxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) {
+ return vluxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP1]]
+//
+vfloat64m2_t testuxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) {
+ return vluxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP1]]
+//
+vfloat64m4_t testuxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) {
+ return vluxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl);
+}
+
+// CHECK-RV32-LABEL: @testuxei64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+// CHECK-RV64-LABEL: @testuxei64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: [[TMP1:%.*]] = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP1]]
+//
+vfloat64m8_t testuxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) {
+ return vluxei64_v_f64m8_m(mask, maskedoff, base, bindex, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
new file mode 100644
index 000000000000..44915ecd355f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc_vv_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc_vx_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc_vv_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc_vx_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc_vv_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc_vx_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc_vv_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc_vx_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc_vv_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc_vx_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc_vv_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc_vx_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc_vv_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc_vx_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc_vv_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc_vv_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc_vv_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc_vv_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc_vv_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc_vv_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc_vv_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc_vv_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc_vv_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc_vv_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc_vv_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc_vv_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc_vv_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc_vv_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc_vv_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc_vv_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc_vx_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc_vv_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc_vx_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc_vv_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc_vx_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc_vv_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc_vx_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc_vv_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc_vx_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc_vv_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc_vx_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc_vv_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc_vx_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc_vv_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc_vx_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc_vv_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc_vx_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc_vv_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc_vx_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc_vv_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc_vx_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc_vv_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc_vx_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc_vv_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc_vx_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc_vv_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc_vx_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc_vv_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc_vx_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc_vv_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc_vx_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc_vv_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc_vx_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc_vv_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc_vx_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc_vv_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc_vx_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc_vv_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc_vx_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc_vv_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc_vx_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc_vv_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc_vx_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc_vv_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmacc_vx_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc_vv_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmacc_vx_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc_vv_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmacc_vx_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc_vv_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmacc_vx_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc_vv_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmacc_vx_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc_vv_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmacc_vx_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc_vv_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmacc_vx_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc_vv_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmacc_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc_vv_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmacc_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc_vv_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmacc_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc_vv_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmacc_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc_vv_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmacc_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc_vv_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmacc_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc_vv_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmacc_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc_vv_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmacc_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc_vv_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmacc_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc_vv_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmacc_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc_vv_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmacc_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc_vv_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmacc_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc_vv_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmacc_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc_vv_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmacc_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc_vv_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmacc_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc_vv_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmacc_vx_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc_vv_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmacc_vx_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc_vv_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmacc_vx_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc_vv_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmacc_vx_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc_vv_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmacc_vx_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc_vv_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmacc_vx_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc_vv_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmacc_vx_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc_vv_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmacc_vx_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc_vv_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmacc_vx_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc_vv_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmacc_vx_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc_vv_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmacc_vx_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc_vv_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmacc_vx_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc_vv_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmacc_vx_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc_vv_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmacc_vx_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc_vv_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmacc_vx_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc_vv_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmacc_vx_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc_vv_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmacc_vx_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc_vv_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmacc_vx_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc_vv_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmacc_vx_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc_vv_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmacc_vx_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc_vv_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmacc_vx_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc_vv_u64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmacc_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmacc_vx_u64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadc.c
new file mode 100644
index 000000000000..620421658141
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadc.c
@@ -0,0 +1,2566 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vvm_i8mf8_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vxm_i8mf8_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadc_vv_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmadc_vx_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vvm_i8mf4_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vxm_i8mf4_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadc_vv_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmadc_vx_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vvm_i8mf2_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vxm_i8mf2_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadc_vv_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmadc_vx_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc_vvm_i8m1_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc_vxm_i8m1_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmadc_vv_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmadc_vx_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vmadc_vvm_i8m2_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vmadc_vxm_i8m2_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmadc_vv_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmadc_vx_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vmadc_vvm_i8m4_b2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vmadc_vxm_i8m4_b2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmadc_vv_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmadc_vx_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vmadc_vvm_i8m8_b1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vmadc_vxm_i8m8_b1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadc_vv_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmadc_vx_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vvm_i16mf4_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vxm_i16mf4_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmadc_vv_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmadc_vx_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vvm_i16mf2_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vxm_i16mf2_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmadc_vv_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmadc_vx_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vvm_i16m1_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vxm_i16m1_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmadc_vv_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmadc_vx_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc_vvm_i16m2_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc_vxm_i16m2_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmadc_vv_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmadc_vx_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc_vvm_i16m4_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vmadc_vxm_i16m4_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmadc_vv_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmadc_vx_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2,
+ vbool2_t carryin, size_t vl) {
+ return vmadc_vvm_i16m8_b2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vmadc_vxm_i16m8_b2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmadc_vv_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmadc_vx_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vvm_i32mf2_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vxm_i32mf2_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmadc_vv_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmadc_vx_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vvm_i32m1_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vxm_i32m1_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmadc_vv_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmadc_vx_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vvm_i32m2_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vxm_i32m2_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmadc_vv_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmadc_vx_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc_vvm_i32m4_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc_vxm_i32m4_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmadc_vv_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmadc_vx_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc_vvm_i32m8_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vmadc_vxm_i32m8_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmadc_vv_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmadc_vx_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vvm_i64m1_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vxm_i64m1_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmadc_vv_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmadc_vx_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vvm_i64m2_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vxm_i64m2_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmadc_vv_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmadc_vx_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vvm_i64m4_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vxm_i64m4_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmadc_vv_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmadc_vx_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc_vvm_i64m8_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc_vxm_i64m8_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmadc_vv_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmadc_vx_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vvm_u8mf8_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vxm_u8mf8_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadc_vv_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmadc_vx_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vvm_u8mf4_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vxm_u8mf4_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadc_vv_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmadc_vx_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vvm_u8mf2_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vxm_u8mf2_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadc_vv_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmadc_vx_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc_vvm_u8m1_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t carryin,
+ size_t vl) {
+ return vmadc_vxm_u8m1_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadc_vv_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmadc_vx_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc_vvm_u8m2_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t carryin,
+ size_t vl) {
+ return vmadc_vxm_u8m2_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadc_vv_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmadc_vx_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2,
+ vbool2_t carryin, size_t vl) {
+ return vmadc_vvm_u8m4_b2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t carryin,
+ size_t vl) {
+ return vmadc_vxm_u8m4_b2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadc_vv_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmadc_vx_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2,
+ vbool1_t carryin, size_t vl) {
+ return vmadc_vvm_u8m8_b1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t carryin,
+ size_t vl) {
+ return vmadc_vxm_u8m8_b1(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadc_vv_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmadc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmadc_vx_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vvm_u16mf4_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vxm_u16mf4_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmadc_vv_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmadc_vx_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vvm_u16mf2_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vxm_u16mf2_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmadc_vv_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmadc_vx_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vvm_u16m1_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vxm_u16m1_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadc_vv_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmadc_vx_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc_vvm_u16m2_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc_vxm_u16m2_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadc_vv_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmadc_vx_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc_vvm_u16m4_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc_vxm_u16m4_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadc_vv_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmadc_vx_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2,
+ vbool2_t carryin, size_t vl) {
+ return vmadc_vvm_u16m8_b2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2,
+ vbool2_t carryin, size_t vl) {
+ return vmadc_vxm_u16m8_b2(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadc_vv_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmadc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmadc_vx_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vvm_u32mf2_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vxm_u32mf2_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmadc_vv_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmadc_vx_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vvm_u32m1_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vxm_u32m1_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadc_vv_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmadc_vx_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vvm_u32m2_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vxm_u32m2_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadc_vv_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmadc_vx_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc_vvm_u32m4_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc_vxm_u32m4_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadc_vv_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmadc_vx_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc_vvm_u32m8_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2,
+ vbool4_t carryin, size_t vl) {
+ return vmadc_vxm_u32m8_b4(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadc_vv_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmadc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmadc_vx_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vvm_u64m1_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2,
+ vbool64_t carryin, size_t vl) {
+ return vmadc_vxm_u64m1_b64(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadc_vv_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmadc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmadc_vx_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vvm_u64m2_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2,
+ vbool32_t carryin, size_t vl) {
+ return vmadc_vxm_u64m2_b32(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadc_vv_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmadc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmadc_vx_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vvm_u64m4_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2,
+ vbool16_t carryin, size_t vl) {
+ return vmadc_vxm_u64m4_b16(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadc_vv_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmadc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmadc_vx_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vvm_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vvm_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc_vvm_u64m8_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vxm_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vxm_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[CARRYIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2,
+ vbool8_t carryin, size_t vl) {
+ return vmadc_vxm_u64m8_b8(op1, op2, carryin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadc_vv_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadc_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadc_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmadc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmadc_vx_u64m8_b8(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
new file mode 100644
index 000000000000..8980828fb045
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd_vv_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd_vx_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd_vv_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd_vx_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd_vv_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd_vx_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd_vv_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd_vx_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd_vv_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd_vx_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd_vv_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd_vx_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd_vv_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd_vx_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd_vv_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd_vv_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd_vv_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd_vv_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd_vv_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd_vv_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd_vv_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd_vv_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd_vv_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd_vv_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd_vv_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd_vv_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd_vv_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd_vv_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd_vv_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd_vv_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd_vx_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd_vv_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd_vx_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd_vv_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd_vx_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd_vv_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd_vx_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd_vv_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd_vx_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd_vv_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd_vx_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd_vv_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd_vx_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd_vv_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd_vx_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd_vv_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd_vx_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd_vv_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd_vx_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd_vv_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd_vx_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd_vv_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd_vx_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd_vv_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd_vx_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd_vv_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd_vx_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd_vv_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd_vx_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd_vv_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd_vx_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd_vv_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd_vx_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd_vv_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd_vx_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd_vv_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd_vx_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd_vv_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd_vx_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd_vv_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd_vx_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd_vv_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd_vx_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd_vv_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmadd_vx_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd_vv_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vmadd_vx_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd_vv_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vmadd_vx_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd_vv_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vmadd_vx_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd_vv_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vmadd_vx_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd_vv_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vmadd_vx_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd_vv_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vmadd_vx_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd_vv_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vmadd_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd_vv_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vmadd_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd_vv_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vmadd_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd_vv_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vmadd_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd_vv_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vmadd_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd_vv_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vmadd_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd_vv_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vmadd_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd_vv_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vmadd_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd_vv_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vmadd_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd_vv_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vmadd_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd_vv_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vmadd_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd_vv_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vmadd_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd_vv_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vmadd_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd_vv_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vmadd_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd_vv_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vmadd_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd_vv_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmadd_vx_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd_vv_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmadd_vx_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd_vv_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmadd_vx_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd_vv_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vmadd_vx_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd_vv_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vmadd_vx_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd_vv_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vmadd_vx_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd_vv_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmadd_vx_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd_vv_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmadd_vx_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd_vv_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmadd_vx_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd_vv_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vmadd_vx_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd_vv_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vmadd_vx_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd_vv_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vmadd_vx_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd_vv_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vmadd_vx_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd_vv_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmadd_vx_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd_vv_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vmadd_vx_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd_vv_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vmadd_vx_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd_vv_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vmadd_vx_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd_vv_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vmadd_vx_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd_vv_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vmadd_vx_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd_vv_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vmadd_vx_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd_vv_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vmadd_vx_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd_vv_u64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmadd_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vmadd_vx_u64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c
new file mode 100644
index 000000000000..f89808de7ff8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmand.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmand_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmand_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmand_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmand_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmand_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmand_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmand_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmand_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmand_mm_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmandnot_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmandnot_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmandnot_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmandnot_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmandnot_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmandnot_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmandnot_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmandnot_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
new file mode 100644
index 000000000000..11332ec8b118
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c
@@ -0,0 +1,2473 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmax_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmax_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmax_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmax_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmax_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmax_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmax_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmax_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmax_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmax_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmax_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmax_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmax_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmax_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmax_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmax_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmax_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmax_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmax_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmax_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmax_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmax_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmaxu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmaxu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmaxu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmaxu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmaxu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmaxu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmaxu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmaxu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmaxu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmaxu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmaxu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmaxu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmaxu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmaxu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmaxu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmaxu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmaxu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmaxu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmaxu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmaxu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmaxu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmaxu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmax_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmax.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmax_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmax.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmax_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmax.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmax_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmax.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmax_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmax.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmax_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmax.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmax_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmax.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmax_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmax_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmax.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmax_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmax.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmax_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmax.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmax_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmax.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmax_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmax.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmax_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmax.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmax_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmax_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmax.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmax_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmax.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmax_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmax.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmax_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmax.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmax_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmax.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmax_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmax_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmax.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmax_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmax.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmax_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmax.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmax_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmax_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmax.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmax_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmaxu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmaxu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmaxu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmaxu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmaxu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmaxu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmaxu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmaxu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmaxu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmaxu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmaxu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmaxu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmaxu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmaxu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmaxu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmaxu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmaxu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmaxu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmaxu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmaxu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmaxu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmaxu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmaxu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmaxu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmaxu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmaxu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmaxu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c
new file mode 100644
index 000000000000..70424925ba02
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmclr.c
@@ -0,0 +1,92 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmclr.nxv64i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmclr_m_b1(size_t vl) { return vmclr_m_b1(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmclr.nxv32i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmclr_m_b2(size_t vl) { return vmclr_m_b2(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmclr.nxv16i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmclr_m_b4(size_t vl) { return vmclr_m_b4(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmclr.nxv8i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmclr_m_b8(size_t vl) { return vmclr_m_b8(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmclr.nxv4i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmclr_m_b16(size_t vl) { return vmclr_m_b16(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmclr.nxv2i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmclr_m_b32(size_t vl) { return vmclr_m_b32(vl); }
+
+// CHECK-RV32-LABEL: @test_vmclr_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmclr_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmclr.nxv1i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmclr_m_b64(size_t vl) { return vmclr_m_b64(vl); }
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
new file mode 100644
index 000000000000..72dbfa7e58e1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmerge.c
@@ -0,0 +1,1466 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
+ size_t vl) {
+ return vmerge_vvm_i8mf8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge_vxm_i8mf8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
+ size_t vl) {
+ return vmerge_vvm_i8mf4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge_vxm_i8mf4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
+ size_t vl) {
+ return vmerge_vvm_i8mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge_vxm_i8mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2,
+ size_t vl) {
+ return vmerge_vvm_i8m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge_vxm_i8m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2,
+ size_t vl) {
+ return vmerge_vvm_i8m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge_vxm_i8m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2,
+ size_t vl) {
+ return vmerge_vvm_i8m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge_vxm_i8m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2,
+ size_t vl) {
+ return vmerge_vvm_i8m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2,
+ size_t vl) {
+ return vmerge_vxm_i8m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1,
+ vint16mf4_t op2, size_t vl) {
+ return vmerge_vvm_i16mf4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge_vxm_i16mf4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1,
+ vint16mf2_t op2, size_t vl) {
+ return vmerge_vvm_i16mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge_vxm_i16mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
+ size_t vl) {
+ return vmerge_vvm_i16m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge_vxm_i16m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
+ size_t vl) {
+ return vmerge_vvm_i16m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge_vxm_i16m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
+ size_t vl) {
+ return vmerge_vvm_i16m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge_vxm_i16m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
+ size_t vl) {
+ return vmerge_vvm_i16m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2,
+ size_t vl) {
+ return vmerge_vxm_i16m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1,
+ vint32mf2_t op2, size_t vl) {
+ return vmerge_vvm_i32mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
+ size_t vl) {
+ return vmerge_vxm_i32mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
+ size_t vl) {
+ return vmerge_vvm_i32m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2,
+ size_t vl) {
+ return vmerge_vxm_i32m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
+ size_t vl) {
+ return vmerge_vvm_i32m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2,
+ size_t vl) {
+ return vmerge_vxm_i32m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
+ size_t vl) {
+ return vmerge_vvm_i32m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2,
+ size_t vl) {
+ return vmerge_vxm_i32m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
+ size_t vl) {
+ return vmerge_vvm_i32m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2,
+ size_t vl) {
+ return vmerge_vxm_i32m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
+ size_t vl) {
+ return vmerge_vvm_i64m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2,
+ size_t vl) {
+ return vmerge_vxm_i64m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
+ size_t vl) {
+ return vmerge_vvm_i64m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2,
+ size_t vl) {
+ return vmerge_vxm_i64m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
+ size_t vl) {
+ return vmerge_vvm_i64m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2,
+ size_t vl) {
+ return vmerge_vxm_i64m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
+ size_t vl) {
+ return vmerge_vvm_i64m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2,
+ size_t vl) {
+ return vmerge_vxm_i64m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1,
+ vuint8mf8_t op2, size_t vl) {
+ return vmerge_vvm_u8mf8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge_vxm_u8mf8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1,
+ vuint8mf4_t op2, size_t vl) {
+ return vmerge_vvm_u8mf4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge_vxm_u8mf4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1,
+ vuint8mf2_t op2, size_t vl) {
+ return vmerge_vvm_u8mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge_vxm_u8mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2,
+ size_t vl) {
+ return vmerge_vvm_u8m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge_vxm_u8m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2,
+ size_t vl) {
+ return vmerge_vvm_u8m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge_vxm_u8m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2,
+ size_t vl) {
+ return vmerge_vvm_u8m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge_vxm_u8m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2,
+ size_t vl) {
+ return vmerge_vvm_u8m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2,
+ size_t vl) {
+ return vmerge_vxm_u8m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
+ vuint16mf4_t op2, size_t vl) {
+ return vmerge_vvm_u16mf4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1,
+ uint16_t op2, size_t vl) {
+ return vmerge_vxm_u16mf4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
+ vuint16mf2_t op2, size_t vl) {
+ return vmerge_vvm_u16mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1,
+ uint16_t op2, size_t vl) {
+ return vmerge_vxm_u16mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1,
+ vuint16m1_t op2, size_t vl) {
+ return vmerge_vvm_u16m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
+ size_t vl) {
+ return vmerge_vxm_u16m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1,
+ vuint16m2_t op2, size_t vl) {
+ return vmerge_vvm_u16m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmerge_vxm_u16m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1,
+ vuint16m4_t op2, size_t vl) {
+ return vmerge_vvm_u16m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmerge_vxm_u16m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1,
+ vuint16m8_t op2, size_t vl) {
+ return vmerge_vvm_u16m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
+ size_t vl) {
+ return vmerge_vxm_u16m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
+ vuint32mf2_t op2, size_t vl) {
+ return vmerge_vvm_u32mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1,
+ uint32_t op2, size_t vl) {
+ return vmerge_vxm_u32mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1,
+ vuint32m1_t op2, size_t vl) {
+ return vmerge_vvm_u32m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
+ size_t vl) {
+ return vmerge_vxm_u32m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1,
+ vuint32m2_t op2, size_t vl) {
+ return vmerge_vvm_u32m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmerge_vxm_u32m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1,
+ vuint32m4_t op2, size_t vl) {
+ return vmerge_vvm_u32m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
+ size_t vl) {
+ return vmerge_vxm_u32m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1,
+ vuint32m8_t op2, size_t vl) {
+ return vmerge_vvm_u32m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
+ size_t vl) {
+ return vmerge_vxm_u32m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1,
+ vuint64m1_t op2, size_t vl) {
+ return vmerge_vvm_u64m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
+ size_t vl) {
+ return vmerge_vxm_u64m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1,
+ vuint64m2_t op2, size_t vl) {
+ return vmerge_vvm_u64m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
+ size_t vl) {
+ return vmerge_vxm_u64m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1,
+ vuint64m4_t op2, size_t vl) {
+ return vmerge_vvm_u64m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
+ size_t vl) {
+ return vmerge_vxm_u64m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1,
+ vuint64m8_t op2, size_t vl) {
+ return vmerge_vvm_u64m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vxm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
+ size_t vl) {
+ return vmerge_vxm_u64m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1,
+ vfloat32mf2_t op2, size_t vl) {
+ return vmerge_vvm_f32mf2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1,
+ vfloat32m1_t op2, size_t vl) {
+ return vmerge_vvm_f32m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1,
+ vfloat32m2_t op2, size_t vl) {
+ return vmerge_vvm_f32m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1,
+ vfloat32m4_t op2, size_t vl) {
+ return vmerge_vvm_f32m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1,
+ vfloat32m8_t op2, size_t vl) {
+ return vmerge_vvm_f32m8(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1,
+ vfloat64m1_t op2, size_t vl) {
+ return vmerge_vvm_f64m1(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1,
+ vfloat64m2_t op2, size_t vl) {
+ return vmerge_vvm_f64m2(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1,
+ vfloat64m4_t op2, size_t vl) {
+ return vmerge_vvm_f64m4(mask, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmerge_vvm_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1,
+ vfloat64m8_t op2, size_t vl) {
+ return vmerge_vvm_f64m8(mask, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
new file mode 100644
index 000000000000..455cdb325136
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c
@@ -0,0 +1,548 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfeq_vv_f32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfeq_vf_f32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfeq_vv_f32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfeq_vf_f32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfeq_vv_f32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfeq_vf_f32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+ return vmfeq_vv_f32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfeq_vf_f32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+ return vmfeq_vv_f32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfeq_vf_f32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfeq_vv_f64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfeq_vf_f64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfeq_vv_f64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfeq_vf_f64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfeq_vv_f64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfeq_vf_f64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+ return vmfeq_vv_f64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfeq_vf_f64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfeq_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfeq_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfeq_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfeq_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfeq_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfeq_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vmfeq_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfeq_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vmfeq_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfeq_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfeq_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfeq_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfeq_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfeq_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfeq_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfeq_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vv_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vmfeq_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfeq_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfeq_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
new file mode 100644
index 000000000000..d25258997b52
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfge_vf_f32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfge_vf_f32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfge_vf_f32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfge_vf_f32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfge_vf_f32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfge_vf_f64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfge_vf_f64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfge_vf_f64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfge_vf_f64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfge_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfge_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfge_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfge_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfge_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfge_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfge_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfge_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfge_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfge_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfge_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
new file mode 100644
index 000000000000..12b39553d4fe
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c
@@ -0,0 +1,272 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfgt_vf_f32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfgt_vf_f32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfgt_vf_f32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfgt_vf_f32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfgt_vf_f32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfgt_vf_f64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfgt_vf_f64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfgt_vf_f64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfgt_vf_f64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfgt_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfgt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfgt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfgt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfgt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfgt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfgt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfgt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfgt_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfgt_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfgt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
new file mode 100644
index 000000000000..8ae45a4accf1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c
@@ -0,0 +1,548 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfle_vv_f32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfle_vf_f32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfle_vv_f32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfle_vf_f32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfle_vv_f32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfle_vf_f32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+ return vmfle_vv_f32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfle_vf_f32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+ return vmfle_vv_f32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfle_vf_f32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfle_vv_f64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfle_vf_f64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfle_vv_f64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfle_vf_f64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfle_vv_f64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfle_vf_f64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+ return vmfle_vv_f64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfle_vf_f64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfle_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfle_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfle_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfle_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfle_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfle_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vmfle_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfle_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vmfle_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfle_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfle_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfle_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfle_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfle_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfle_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfle_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vv_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vmfle_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfle_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfle_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
new file mode 100644
index 000000000000..7b87bd985902
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c
@@ -0,0 +1,548 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmflt_vv_f32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmflt_vf_f32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmflt_vv_f32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmflt_vf_f32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmflt_vv_f32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmflt_vf_f32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+ return vmflt_vv_f32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmflt_vf_f32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+ return vmflt_vv_f32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmflt_vf_f32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmflt_vv_f64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmflt_vf_f64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmflt_vv_f64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmflt_vf_f64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmflt_vv_f64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmflt_vf_f64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+ return vmflt_vv_f64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmflt_vf_f64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmflt_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmflt_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmflt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmflt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmflt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmflt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vmflt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmflt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vmflt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmflt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmflt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmflt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmflt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmflt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmflt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmflt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vv_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vmflt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmflt_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmflt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
new file mode 100644
index 000000000000..bbfe91c5c80f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c
@@ -0,0 +1,548 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfne_vv_f32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32.i32(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32.i64(<vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfne_vf_f32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_f32m1_b32(vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfne_vv_f32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32.i32(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32.i64(<vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_f32m1_b32(vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfne_vf_f32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_f32m2_b16(vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfne_vv_f32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32.i32(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32.i64(<vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_f32m2_b16(vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfne_vf_f32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_f32m4_b8(vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
+ return vmfne_vv_f32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32.i32(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32.i64(<vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_f32m4_b8(vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfne_vf_f32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_f32m8_b4(vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
+ return vmfne_vv_f32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.f32.i32(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f32.f32.i64(<vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_f32m8_b4(vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfne_vf_f32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f64m1_b64(vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfne_vv_f64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64.i32(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64.i64(<vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f64m1_b64(vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfne_vf_f64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_f64m2_b32(vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfne_vv_f64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64.i32(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64.i64(<vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_f64m2_b32(vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfne_vf_f64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_f64m4_b16(vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfne_vv_f64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64.i32(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64.i64(<vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_f64m4_b16(vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfne_vf_f64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_f64m8_b8(vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
+ return vmfne_vv_f64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.f64.i32(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f64.f64.i64(<vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfne_vf_f64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, vfloat32mf2_t op2,
+ size_t vl) {
+ return vmfne_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat32mf2_t op1, float op2, size_t vl) {
+ return vmfne_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, vfloat32m1_t op2,
+ size_t vl) {
+ return vmfne_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat32m1_t op1, float op2, size_t vl) {
+ return vmfne_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, vfloat32m2_t op2,
+ size_t vl) {
+ return vmfne_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat32m2_t op1, float op2, size_t vl) {
+ return vmfne_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, vfloat32m4_t op2,
+ size_t vl) {
+ return vmfne_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat32m4_t op1, float op2, size_t vl) {
+ return vmfne_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, vfloat32m8_t op2,
+ size_t vl) {
+ return vmfne_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.f32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f32.f32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vfloat32m8_t op1, float op2, size_t vl) {
+ return vmfne_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, vfloat64m1_t op2,
+ size_t vl) {
+ return vmfne_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vfloat64m1_t op1, double op2, size_t vl) {
+ return vmfne_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, vfloat64m2_t op2,
+ size_t vl) {
+ return vmfne_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vfloat64m2_t op1, double op2, size_t vl) {
+ return vmfne_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, vfloat64m4_t op2,
+ size_t vl) {
+ return vmfne_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vfloat64m4_t op1, double op2, size_t vl) {
+ return vmfne_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vv_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, vfloat64m8_t op2,
+ size_t vl) {
+ return vmfne_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmfne_vf_f64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.f64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f64.f64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vfloat64m8_t op1, double op2, size_t vl) {
+ return vmfne_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
new file mode 100644
index 000000000000..7af09cd98de6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmin_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmin_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmin_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmin_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmin_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmin_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmin_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmin_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmin_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmin_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmin_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmin_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmin_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmin_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmin_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmin_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmin_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmin_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmin_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmin_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmin_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmin_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vminu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vminu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vminu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vminu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vminu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vminu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vminu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vminu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vminu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vminu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vminu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vminu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vminu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vminu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vminu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vminu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vminu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vminu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vminu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vminu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vminu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vminu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmin_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmin.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmin_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmin.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmin_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmin.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmin_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmin.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmin_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmin.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmin_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmin.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmin_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmin.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmin_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmin_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmin.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmin_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmin.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmin_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmin.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmin_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmin.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmin_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmin.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmin_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmin.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmin_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmin_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmin.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmin_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmin.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmin_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmin.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmin_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmin.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmin_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmin.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmin_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmin_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmin.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmin_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmin.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmin_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmin.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmin_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmin_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmin.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmin_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vminu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vminu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vminu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vminu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vminu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vminu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vminu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vminu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vminu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vminu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vminu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vminu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vminu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vminu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vminu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vminu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vminu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vminu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vminu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vminu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vminu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vminu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vminu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vminu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vminu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vminu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vminu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vminu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vminu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vminu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vminu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vminu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vminu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vminu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vminu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vminu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vminu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vminu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vminu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vminu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vminu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vminu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vminu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vminu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vminu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vminu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vminu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vminu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vminu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c
new file mode 100644
index 000000000000..c7fd8f589f92
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnand.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnand.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmnand_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmnand_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnand.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmnand_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmnand_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnand.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmnand_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmnand_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnand.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmnand_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmnand_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnand.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmnand_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmnand_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmnand_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmnand_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnand_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnand_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmnand_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmnand_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c
new file mode 100644
index 000000000000..2641e55e04c6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmnor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmnor_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmnor_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmnor_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmnor_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmnor_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmnor_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmnor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmnor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmnor_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c
new file mode 100644
index 000000000000..c6c6e7a5ba93
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmor.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmor_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmor_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmor_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmor_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmor_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmor_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmor_mm_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmornot_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmornot_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmornot_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmornot_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmornot_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmornot_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmornot_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmornot_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmornot_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbc.c
new file mode 100644
index 000000000000..774de45dabf7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbc.c
@@ -0,0 +1,2566 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vvm_i8mf8_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_i8mf8_b64(vint8mf8_t op1, int8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vxm_i8mf8_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmsbc_vv_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsbc_vx_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vvm_i8mf4_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_i8mf4_b32(vint8mf4_t op1, int8_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vxm_i8mf4_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmsbc_vv_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsbc_vx_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vvm_i8mf2_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_i8mf2_b16(vint8mf2_t op1, int8_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vxm_i8mf2_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmsbc_vv_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsbc_vx_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_i8m1_b8(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc_vvm_i8m1_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_i8m1_b8(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_i8m1_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmsbc_vv_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsbc_vx_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_i8m2_b4(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vmsbc_vvm_i8m2_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_i8m2_b4(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_i8m2_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmsbc_vv_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsbc_vx_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vvm_i8m4_b2(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vmsbc_vvm_i8m4_b2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vxm_i8m4_b2(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_i8m4_b2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmsbc_vv_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsbc_vx_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vvm_i8m8_b1(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vmsbc_vvm_i8m8_b1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vxm_i8m8_b1(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_i8m8_b1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmsbc_vv_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsbc_vx_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vvm_i16mf4_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_i16mf4_b64(vint16mf4_t op1, int16_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vxm_i16mf4_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmsbc_vv_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsbc_vx_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vvm_i16mf2_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_i16mf2_b32(vint16mf2_t op1, int16_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vxm_i16mf2_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmsbc_vv_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsbc_vx_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_i16m1_b16(vint16m1_t op1, vint16m1_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vvm_i16m1_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_i16m1_b16(vint16m1_t op1, int16_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vxm_i16m1_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmsbc_vv_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsbc_vx_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_i16m2_b8(vint16m2_t op1, vint16m2_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc_vvm_i16m2_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_i16m2_b8(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_i16m2_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmsbc_vv_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsbc_vx_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_i16m4_b4(vint16m4_t op1, vint16m4_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc_vvm_i16m4_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_i16m4_b4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_i16m4_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmsbc_vv_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsbc_vx_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vvm_i16m8_b2(vint16m8_t op1, vint16m8_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vmsbc_vvm_i16m8_b2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vxm_i16m8_b2(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_i16m8_b2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmsbc_vv_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsbc_vx_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vvm_i32mf2_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_i32mf2_b64(vint32mf2_t op1, int32_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vxm_i32mf2_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmsbc_vv_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsbc_vx_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_i32m1_b32(vint32m1_t op1, vint32m1_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vvm_i32m1_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_i32m1_b32(vint32m1_t op1, int32_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vxm_i32m1_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmsbc_vv_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsbc_vx_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_i32m2_b16(vint32m2_t op1, vint32m2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vvm_i32m2_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_i32m2_b16(vint32m2_t op1, int32_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vxm_i32m2_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmsbc_vv_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsbc_vx_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_i32m4_b8(vint32m4_t op1, vint32m4_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc_vvm_i32m4_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_i32m4_b8(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_i32m4_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmsbc_vv_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsbc_vx_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_i32m8_b4(vint32m8_t op1, vint32m8_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc_vvm_i32m8_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_i32m8_b4(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_i32m8_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmsbc_vv_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsbc_vx_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_i64m1_b64(vint64m1_t op1, vint64m1_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vvm_i64m1_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_i64m1_b64(vint64m1_t op1, int64_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vxm_i64m1_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmsbc_vv_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsbc_vx_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_i64m2_b32(vint64m2_t op1, vint64m2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vvm_i64m2_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_i64m2_b32(vint64m2_t op1, int64_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vxm_i64m2_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmsbc_vv_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsbc_vx_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_i64m4_b16(vint64m4_t op1, vint64m4_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vvm_i64m4_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_i64m4_b16(vint64m4_t op1, int64_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vxm_i64m4_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmsbc_vv_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsbc_vx_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_i64m8_b8(vint64m8_t op1, vint64m8_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc_vvm_i64m8_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_i64m8_b8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_i64m8_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmsbc_vv_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsbc_vx_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vvm_u8mf8_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_u8mf8_b64(vuint8mf8_t op1, uint8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vxm_u8mf8_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmsbc_vv_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsbc_vx_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vvm_u8mf4_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_u8mf4_b32(vuint8mf4_t op1, uint8_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vxm_u8mf4_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmsbc_vv_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsbc_vx_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vvm_u8mf2_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_u8mf2_b16(vuint8mf2_t op1, uint8_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vxm_u8mf2_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmsbc_vv_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsbc_vx_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc_vvm_u8m1_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_u8m1_b8(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_u8m1_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsbc_vv_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsbc_vx_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc_vvm_u8m2_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_u8m2_b4(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_u8m2_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsbc_vv_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsbc_vx_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vvm_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vmsbc_vvm_u8m4_b2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vxm_u8m4_b2(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_u8m4_b2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsbc_vv_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsbc_vx_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vvm_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2,
+ vbool1_t borrowin, size_t vl) {
+ return vmsbc_vvm_u8m8_b1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vxm_u8m8_b1(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vmsbc_vxm_u8m8_b1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsbc_vv_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbc_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsbc_vx_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vvm_u16mf4_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_u16mf4_b64(vuint16mf4_t op1, uint16_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vxm_u16mf4_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsbc_vv_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmsbc_vx_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vvm_u16mf2_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_u16mf2_b32(vuint16mf2_t op1, uint16_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vxm_u16mf2_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsbc_vv_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmsbc_vx_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vvm_u16m1_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_u16m1_b16(vuint16m1_t op1, uint16_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vxm_u16m1_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmsbc_vv_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsbc_vx_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc_vvm_u16m2_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_u16m2_b8(vuint16m2_t op1, uint16_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc_vxm_u16m2_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmsbc_vv_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsbc_vx_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc_vvm_u16m4_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_u16m4_b4(vuint16m4_t op1, uint16_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc_vxm_u16m4_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmsbc_vv_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsbc_vx_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vvm_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vmsbc_vvm_u16m8_b2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vxm_u16m8_b2(vuint16m8_t op1, uint16_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vmsbc_vxm_u16m8_b2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmsbc_vv_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbc_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsbc_vx_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vvm_u32mf2_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_u32mf2_b64(vuint32mf2_t op1, uint32_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vxm_u32mf2_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsbc_vv_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmsbc_vx_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vvm_u32m1_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_u32m1_b32(vuint32m1_t op1, uint32_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vxm_u32m1_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmsbc_vv_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsbc_vx_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vvm_u32m2_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_u32m2_b16(vuint32m2_t op1, uint32_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vxm_u32m2_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmsbc_vv_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsbc_vx_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc_vvm_u32m4_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_u32m4_b8(vuint32m4_t op1, uint32_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc_vxm_u32m4_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmsbc_vv_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsbc_vx_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vvm_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc_vvm_u32m8_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vxm_u32m8_b4(vuint32m8_t op1, uint32_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vmsbc_vxm_u32m8_b4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmsbc_vv_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbc_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsbc_vx_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vvm_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vvm_u64m1_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vxm_u64m1_b64(vuint64m1_t op1, uint64_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vmsbc_vxm_u64m1_b64(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmsbc_vv_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbc_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsbc_vx_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vvm_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vvm_u64m2_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vxm_u64m2_b32(vuint64m2_t op1, uint64_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vmsbc_vxm_u64m2_b32(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmsbc_vv_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbc_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsbc_vx_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vvm_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vvm_u64m4_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vxm_u64m4_b16(vuint64m4_t op1, uint64_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vmsbc_vxm_u64m4_b16(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmsbc_vv_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbc_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsbc_vx_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vvm_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vvm_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vvm_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc_vvm_u64m8_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vxm_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vxm_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vxm_u64m8_b8(vuint64m8_t op1, uint64_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vmsbc_vxm_u64m8_b8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmsbc_vv_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbc_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbc_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbc_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsbc_vx_u64m8_b8(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
new file mode 100644
index 000000000000..df0afc96309f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
@@ -0,0 +1,211 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbf_m_b1(vbool1_t op1, size_t vl) {
+ return vmsbf_m_b1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbf_m_b2(vbool2_t op1, size_t vl) {
+ return vmsbf_m_b2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4(vbool4_t op1, size_t vl) {
+ return vmsbf_m_b4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) {
+ return vmsbf_m_b8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbf_m_b16(vbool16_t op1, size_t vl) {
+ return vmsbf_m_b16(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbf_m_b32(vbool32_t op1, size_t vl) {
+ return vmsbf_m_b32(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) {
+ return vmsbf_m_b64(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsbf_m_b1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsbf_m_b2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsbf_m_b4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsbf_m_b8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsbf_m_b16_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsbf_m_b32_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsbf_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsbf_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsbf_m_b64_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
new file mode 100644
index 000000000000..dd581dc4aa6b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmseq.c
@@ -0,0 +1,2584 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmseq_vv_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmseq_vv_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmseq_vv_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmseq_vv_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmseq_vv_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmseq_vv_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmseq_vv_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmseq_vv_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmseq_vv_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmseq_vv_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmseq_vv_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmseq_vv_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmseq_vv_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmseq_vv_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmseq_vx_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmseq_vv_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmseq_vx_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmseq_vv_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmseq_vx_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmseq_vv_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmseq_vx_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmseq_vv_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmseq_vx_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmseq_vv_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmseq_vx_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmseq_vv_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmseq_vx_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmseq_vv_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmseq_vx_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmseq_vv_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmseq_vx_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmseq_vv_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmseq_vv_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmseq_vv_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmseq_vv_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmseq_vv_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmseq_vv_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmseq_vv_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmseq_vv_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmseq_vx_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmseq_vv_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmseq_vx_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmseq_vv_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmseq_vx_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmseq_vv_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmseq_vx_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmseq_vv_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmseq_vx_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmseq_vv_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmseq_vx_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmseq_vv_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmseq_vx_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmseq_vv_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmseq_vx_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmseq_vv_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmseq_vx_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmseq_vv_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmseq_vx_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmseq_vv_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmseq_vx_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmseq_vv_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmseq_vx_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmseq_vv_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmseq_vx_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmseq_vv_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmseq_vx_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmseq_vv_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmseq_vx_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmseq_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmseq_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmseq_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmseq_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmseq_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmseq_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmseq_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmseq_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmseq_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmseq_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmseq_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmseq_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmseq_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmseq_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmseq_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmseq_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmseq_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmseq_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmseq_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmseq_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmseq_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmseq_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmseq_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmseq_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmseq_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmseq_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmseq_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmseq_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmseq_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmseq_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmseq_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmseq_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmseq_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmseq_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmseq_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmseq_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmseq_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmseq_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmseq_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmseq_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmseq.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmseq_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmseq_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmseq_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmseq_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmseq_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmseq_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmseq_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmseq_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmseq_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmseq_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmseq_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmseq_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmseq_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmseq_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmseq_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmseq_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmseq_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmseq_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmseq_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmseq_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmseq_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmseq_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmseq_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmseq_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmseq_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmseq_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmseq_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmseq_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmseq_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vv_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vv_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmseq_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmseq_vx_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmseq_vx_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmseq_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c
new file mode 100644
index 000000000000..bb61dfe7c8fa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmset.c
@@ -0,0 +1,92 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmset_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmset_m_b1(size_t vl) { return vmset_m_b1(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmset_m_b2(size_t vl) { return vmset_m_b2(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmset_m_b4(size_t vl) { return vmset_m_b4(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmset_m_b8(size_t vl) { return vmset_m_b8(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmset_m_b16(size_t vl) { return vmset_m_b16(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmset_m_b32(size_t vl) { return vmset_m_b32(vl); }
+
+// CHECK-RV32-LABEL: @test_vmset_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1.i32(i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmset_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmset_m_b64(size_t vl) { return vmset_m_b64(vl); }
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
new file mode 100644
index 000000000000..4fcac2adb94b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsgt.c
@@ -0,0 +1,1287 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsgt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsgt_vx_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsgt_vx_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsgt_vx_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsgt_vx_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsgt_vx_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsgt_vx_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsgt_vx_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsgt_vx_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsgt_vx_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgtu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsgtu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu_vx_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu_vx_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu_vx_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu_vx_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu_vx_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgtu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu_vx_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu_vx_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu_vx_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu_vx_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu_vx_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu_vx_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu_vx_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu_vx_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu_vx_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu_vx_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgt.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsgt_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsgt_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsgt_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsgt_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsgt_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsgt_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsgt_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsgt_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsgt_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsgt_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgt_vx_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgt_vx_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsgt_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsgtu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsgtu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsgtu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsgtu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsgtu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmsgtu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsgtu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsgtu_vx_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsgtu_vx_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsgtu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
new file mode 100644
index 000000000000..f3ad1de79f2c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
@@ -0,0 +1,211 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsif_m_b1(vbool1_t op1, size_t vl) {
+ return vmsif_m_b1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsif_m_b2(vbool2_t op1, size_t vl) {
+ return vmsif_m_b2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4(vbool4_t op1, size_t vl) {
+ return vmsif_m_b4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) {
+ return vmsif_m_b8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsif_m_b16(vbool16_t op1, size_t vl) {
+ return vmsif_m_b16(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsif_m_b32(vbool32_t op1, size_t vl) {
+ return vmsif_m_b32(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) {
+ return vmsif_m_b64(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsif_m_b1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsif_m_b2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsif_m_b4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsif_m_b8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsif_m_b16_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsif_m_b32_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsif_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsif_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsif_m_b64_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
new file mode 100644
index 000000000000..1959f490357a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsle.c
@@ -0,0 +1,2599 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmsle_vv_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmsle_vv_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmsle_vv_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmsle_vv_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmsle_vv_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmsle_vv_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsle_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmsle_vv_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsle_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmsle_vv_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmsle_vv_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmsle_vv_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmsle_vv_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmsle_vv_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmsle_vv_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmsle_vv_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsle_vx_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmsle_vv_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsle_vx_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmsle_vv_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsle_vx_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmsle_vv_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsle_vx_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmsle_vv_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsle_vx_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmsle_vv_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsle_vx_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmsle_vv_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsle_vx_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmsle_vv_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsle_vx_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmsle_vv_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsle_vx_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmsleu_vv_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmsleu_vv_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsleu_vv_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsleu_vv_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsleu_vv_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsleu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsleu_vv_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsleu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsleu_vv_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmsleu_vx_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmsleu_vx_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmsleu_vv_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsleu_vx_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmsleu_vv_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsleu_vx_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmsleu_vv_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsleu_vx_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmsleu_vv_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsleu_vx_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmsleu_vx_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmsleu_vv_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsleu_vx_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsleu_vx_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmsleu_vv_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsleu_vx_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmsleu_vv_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsleu_vx_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmsleu_vv_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsleu_vx_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsleu_vx_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmsleu_vv_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsleu_vx_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmsleu_vv_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsleu_vx_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmsle_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmsle_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmsle_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmsle_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmsle_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmsle_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsle_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmsle_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsle.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsle_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsle_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmsle_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmsle_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmsle_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmsle_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmsle_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmsle_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsle_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsle_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmsle_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsle_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmsle_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsle_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmsle_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsle_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmsle_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsle_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmsle_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsle_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsle_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmsle_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsle_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsle_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmsle_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsle_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsle_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmsle_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsle_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsle_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vv_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vv_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmsle_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsle_vx_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsle_vx_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsle_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsle_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmsleu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmsleu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsleu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsleu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsleu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsleu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsleu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsleu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsleu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsleu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsleu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsleu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsleu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmsleu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsleu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsleu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vmsleu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsleu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vmsleu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsleu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsleu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmsleu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmsleu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsleu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsleu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vmsleu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsleu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vmsleu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsleu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsleu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmsleu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsleu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsleu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmsleu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsleu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsleu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmsleu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsleu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsleu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vv_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vv_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vmsleu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsleu_vx_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsleu_vx_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsleu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
new file mode 100644
index 000000000000..c872d30dabd9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmslt.c
@@ -0,0 +1,2599 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmslt_vv_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmslt_vv_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmslt_vv_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmslt_vv_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmslt_vv_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmslt_vv_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmslt_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmslt_vv_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmslt_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmslt_vv_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmslt_vv_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmslt_vv_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmslt_vv_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmslt_vv_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmslt_vv_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmslt_vv_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmslt_vx_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmslt_vv_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmslt_vx_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmslt_vv_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmslt_vx_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmslt_vv_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmslt_vx_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmslt_vv_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmslt_vx_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmslt_vv_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmslt_vx_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmslt_vv_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmslt_vx_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmslt_vv_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmslt_vx_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmslt_vv_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmslt_vx_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmsltu_vv_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmsltu_vv_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsltu_vv_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsltu_vv_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsltu_vv_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsltu_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsltu_vv_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsltu_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsltu_vv_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmsltu_vx_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmsltu_vx_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmsltu_vv_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsltu_vx_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmsltu_vv_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsltu_vx_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmsltu_vv_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsltu_vx_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmsltu_vv_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsltu_vx_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmsltu_vx_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmsltu_vv_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsltu_vx_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsltu_vx_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmsltu_vv_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsltu_vx_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmsltu_vv_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsltu_vx_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmsltu_vv_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsltu_vx_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsltu_vx_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmsltu_vv_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsltu_vx_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmsltu_vv_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsltu_vx_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmslt_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmslt_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmslt_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmslt_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmslt_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmslt_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmslt_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmslt.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmslt_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmslt_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmslt_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmslt_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmslt_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmslt_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmslt_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmslt_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmslt_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmslt_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmslt_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmslt_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmslt_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmslt_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmslt_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmslt_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmslt_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmslt_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmslt_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmslt_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmslt_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmslt_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmslt_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmslt_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vv_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vv_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmslt_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmslt_vx_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmslt_vx_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmslt_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmsltu_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmsltu_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsltu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsltu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsltu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsltu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsltu.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsltu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsltu_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsltu_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsltu_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmsltu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsltu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsltu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vmsltu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsltu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vmsltu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsltu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmsltu_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmsltu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsltu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsltu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vmsltu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsltu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vmsltu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsltu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmsltu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsltu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmsltu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsltu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmsltu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsltu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vv_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vv_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vmsltu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsltu_vx_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsltu_vx_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsltu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
new file mode 100644
index 000000000000..432c80994608
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsne.c
@@ -0,0 +1,2584 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i8mf8_b64(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmsne_vv_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i8mf8_b64(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i8mf4_b32(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmsne_vv_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i8mf4_b32(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i8mf2_b16(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmsne_vv_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i8mf2_b16(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i8m1_b8(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmsne_vv_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i8m1_b8(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i8m2_b4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmsne_vv_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i8m2_b4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_i8m4_b2(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmsne_vv_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_i8m4_b2(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vv_i8m8_b1(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmsne_vv_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vx_i8m8_b1(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i16mf4_b64(vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmsne_vv_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i16mf4_b64(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i16mf2_b32(vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmsne_vv_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i16mf2_b32(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i16m1_b16(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmsne_vv_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i16m1_b16(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i16m2_b8(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmsne_vv_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i16m2_b8(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i16m4_b4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmsne_vv_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i16m4_b4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_i16m8_b2(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmsne_vv_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_i16m8_b2(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i32mf2_b64(vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmsne_vv_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i32mf2_b64(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsne_vx_i32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i32m1_b32(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmsne_vv_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i32m1_b32(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsne_vx_i32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i32m2_b16(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmsne_vv_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i32m2_b16(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsne_vx_i32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i32m4_b8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmsne_vv_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i32m4_b8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsne_vx_i32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i32m8_b4(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmsne_vv_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i32m8_b4(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsne_vx_i32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i64m1_b64(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmsne_vv_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i64m1_b64(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsne_vx_i64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i64m2_b32(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmsne_vv_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i64m2_b32(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsne_vx_i64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i64m4_b16(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmsne_vv_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i64m4_b16(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsne_vx_i64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i64m8_b8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmsne_vv_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsne_vx_i64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u8mf8_b64(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmsne_vv_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf8_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u8mf8_b64(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8mf8_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u8mf4_b32(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmsne_vv_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf4_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u8mf4_b32(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8mf4_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u8mf2_b16(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmsne_vv_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u8mf2_b16(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8mf2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u8m1_b8(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsne_vv_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m1_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u8m1_b8(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8m1_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u8m2_b4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsne_vv_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m2_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u8m2_b4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8m2_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_u8m4_b2(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsne_vv_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m4_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_u8m4_b2(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8m4_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vv_u8m8_b1(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsne_vv_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m8_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vx_u8m8_b1(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8m8_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u16mf4_b64(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsne_vv_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16mf4_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u16mf4_b64(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmsne_vx_u16mf4_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u16mf2_b32(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsne_vv_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16mf2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u16mf2_b32(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmsne_vx_u16mf2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u16m1_b16(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmsne_vv_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m1_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u16m1_b16(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsne_vx_u16m1_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u16m2_b8(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmsne_vv_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m2_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u16m2_b8(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsne_vx_u16m2_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u16m4_b4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmsne_vv_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m4_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u16m4_b4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsne_vx_u16m4_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_u16m8_b2(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmsne_vv_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m8_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_u16m8_b2(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsne_vx_u16m8_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u32mf2_b64(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsne_vv_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32mf2_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u32mf2_b64(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmsne_vx_u32mf2_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u32m1_b32(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmsne_vv_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m1_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u32m1_b32(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsne_vx_u32m1_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u32m2_b16(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmsne_vv_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m2_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u32m2_b16(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsne_vx_u32m2_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u32m4_b8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmsne_vv_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m4_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u32m4_b8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsne_vx_u32m4_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u32m8_b4(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmsne_vv_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m8_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u32m8_b4(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsne_vx_u32m8_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u64m1_b64(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmsne_vv_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m1_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u64m1_b64(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsne_vx_u64m1_b64(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u64m2_b32(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmsne_vv_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m2_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u64m2_b32(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsne_vx_u64m2_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u64m4_b16(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmsne_vv_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m4_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u64m4_b16(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsne_vx_u64m4_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u64m8_b8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmsne_vv_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m8_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsne_vx_u64m8_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmsne_vv_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmsne_vv_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmsne_vv_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmsne_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmsne_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmsne_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmsne_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmsne_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vmsne_vv_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vmsne_vv_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmsne_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmsne_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmsne_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmsne_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmsne_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vmsne_vv_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmsne_vx_i32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmsne_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmsne_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmsne_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmsne_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmsne_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmsne_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmsne_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmsne_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmsne_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmsne_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmsne_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmsne_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmsne_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmsne_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmsne_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_i64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_i64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmsne_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vmsne_vv_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf8_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf8_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u8mf8_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8mf8_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vmsne_vv_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf4_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf4_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u8mf4_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8mf4_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vmsne_vv_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8mf2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8mf2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u8mf2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8mf2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmsne_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m1_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m1_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmsne_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m2_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m2_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmsne_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m4_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m4_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmsne_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u8m8_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u8m8_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsne.mask.nxv64i8.i8.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmsne_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vmsne_vv_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16mf4_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf4_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsne_vx_u16mf4_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vmsne_vv_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16mf2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16mf2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vmsne_vx_u16mf2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vmsne_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m1_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m1_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmsne_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmsne_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m2_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m2_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmsne_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmsne_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m4_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m4_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmsne_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmsne_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u16m8_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u16m8_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i16.i16.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmsne_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vmsne_vv_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32mf2_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vmsne_vx_u32mf2_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vmsne_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m1_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m1_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmsne_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vmsne_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m2_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m2_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmsne_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmsne_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m4_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m4_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmsne_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmsne_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u32m8_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u32m8_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i32.i32.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmsne_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vmsne_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m1_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m1_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmsne_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vmsne_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m2_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m2_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmsne_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vmsne_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m4_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m4_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmsne_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vv_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vv_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmsne_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsne_vx_u64m8_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsne_vx_u64m8_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i64.i64.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmsne_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
new file mode 100644
index 000000000000..ccc66d48bdf5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
@@ -0,0 +1,211 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsof_m_b1(vbool1_t op1, size_t vl) {
+ return vmsof_m_b1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsof_m_b2(vbool2_t op1, size_t vl) {
+ return vmsof_m_b2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4(vbool4_t op1, size_t vl) {
+ return vmsof_m_b4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) {
+ return vmsof_m_b8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsof_m_b16(vbool16_t op1, size_t vl) {
+ return vmsof_m_b16(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsof_m_b32(vbool32_t op1, size_t vl) {
+ return vmsof_m_b32(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) {
+ return vmsof_m_b64(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i32(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
+ size_t vl) {
+ return vmsof_m_b1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i32(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
+ size_t vl) {
+ return vmsof_m_b2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i32(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
+ size_t vl) {
+ return vmsof_m_b4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i32(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
+ size_t vl) {
+ return vmsof_m_b8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i32(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
+ size_t vl) {
+ return vmsof_m_b16_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i32(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
+ size_t vl) {
+ return vmsof_m_b32_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmsof_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i32(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmsof_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
+ size_t vl) {
+ return vmsof_m_b64_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
new file mode 100644
index 000000000000..165bd1a12c1d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c
@@ -0,0 +1,6168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmul_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmul_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmul_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmul_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmul_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmul_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmul_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmul_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmul_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmul_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmul_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmul_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmul_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmul_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmul_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmul_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmul_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmul_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmul_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmul_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmul_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmul_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmul_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmul_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmul_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmul_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmul_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmul_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmul_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmul_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmul_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmul_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmul_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmul_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmul_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmul_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmul_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmul_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmul_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmul_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmul_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmul_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmul_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmul_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmulh_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmulh_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmulh_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmulh_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmulh_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmulh_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmulh_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmulh_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmulh_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmulh_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmulh_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmulh_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmulh_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmulh_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmulh_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmulh_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmulh_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmulh_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmulh_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmulh_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmulh_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmulh_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vx_i8mf8(vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vx_i8mf4(vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vx_i8mf2(vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vv_i8m1(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhsu_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vx_i8m1(vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vv_i8m2(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhsu_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vx_i8m2(vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vv_i8m4(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhsu_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vx_i8m4(vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vv_i8m8(vint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhsu_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vx_i8m8(vint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhsu_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vx_i16mf4(vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vx_i16mf2(vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vv_i16m1(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhsu_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vx_i16m1(vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vv_i16m2(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhsu_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vx_i16m2(vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vv_i16m4(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhsu_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vx_i16m4(vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vv_i16m8(vint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhsu_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vx_i16m8(vint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vx_i32mf2(vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vv_i32m1(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhsu_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vx_i32m1(vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vv_i32m2(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhsu_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vx_i32m2(vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vv_i32m4(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhsu_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vx_i32m4(vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vv_i32m8(vint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhsu_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhsu_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhsu_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhsu_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhsu_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmul_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmul_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmul_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmul_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmul_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmul_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmul_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmul_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmul_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmul_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmul_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmul_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmul_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmul_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmul_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmul_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmul_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmul_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmul_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmul_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmul_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmul_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmul_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmul_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmul_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmul_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmul_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vmulh_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulh.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vmulh_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulh.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vmulh_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulh.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vmulh_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulh.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vmulh_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulh.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vmulh_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulh.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vmulh_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulh.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vmulh_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vmulh_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulh.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vmulh_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulh.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vmulh_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulh.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vmulh_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulh.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vmulh_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulh.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vmulh_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulh.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vmulh_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vmulh_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulh.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vmulh_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulh.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vmulh_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulh.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vmulh_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulh.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vmulh_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulh.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vmulh_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vmulh_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vmulh_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vmulh_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vmulh_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulh_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vmulh_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vmulhsu_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vmulhsu_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vmulhsu_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vmulhsu_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) {
+ return vmulhsu_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vmulhsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vmulhsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vmulhsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vmulhsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vmulhsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) {
+ return vmulhsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vmulhsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vmulhsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vmulhsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vmulhsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vmulhsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
+ return vmulhsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vmulhsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vmulhsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vmulhsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vmulhsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmulhsu_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
+ return vmulhsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c
new file mode 100644
index 000000000000..5f06b751718b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxnor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxnor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmxnor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmxnor_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxnor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmxnor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmxnor_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxnor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmxnor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmxnor_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxnor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmxnor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmxnor_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxnor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmxnor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmxnor_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxnor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmxnor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmxnor_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxnor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxnor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmxnor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmxnor_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c
new file mode 100644
index 000000000000..9c843877a83f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmxor.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmxor.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
+//
+vbool1_t test_vmxor_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmxor_mm_b1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmxor.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
+//
+vbool2_t test_vmxor_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmxor_mm_b2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmxor.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmxor_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmxor_mm_b4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmxor.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
+//
+vbool8_t test_vmxor_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmxor_mm_b8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmxor.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
+//
+vbool16_t test_vmxor_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmxor_mm_b16(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmxor.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
+//
+vbool32_t test_vmxor_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmxor_mm_b32(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vmxor_mm_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vmxor_mm_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
+//
+vbool64_t test_vmxor_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmxor_mm_b64(op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
new file mode 100644
index 000000000000..5c3c0cbb0c81
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c
@@ -0,0 +1,1793 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnclip_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
+ return vnclip_wv_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnclip_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnclip_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
+ return vnclip_wv_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnclip_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnclip_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
+ return vnclip_wv_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnclip_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnclip_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnclip_wv_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnclip_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnclip_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnclip_wv_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnclip_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnclip_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnclip_wv_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnclip_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnclip_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vnclip_wv_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnclip_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnclip_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vnclip_wv_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnclip_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnclip_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
+ return vnclip_wv_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnclip_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnclip_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
+ return vnclip_wv_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnclip_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnclip_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
+ return vnclip_wv_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnclip_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnclip_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vnclip_wv_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnclip_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnclip_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ return vnclip_wv_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnclip_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnclip_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
+ return vnclip_wv_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnclip_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnclip_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
+ return vnclip_wv_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnclip_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnclipu_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift,
+ size_t vl) {
+ return vnclipu_wv_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnclipu_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnclipu_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift,
+ size_t vl) {
+ return vnclipu_wv_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnclipu_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnclipu_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnclipu_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnclipu_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnclipu_wv_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnclipu_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnclipu_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnclipu_wv_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnclipu_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnclipu_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnclipu_wv_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnclipu_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnclipu_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vnclipu_wv_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnclipu_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnclipu_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnclipu_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnclipu_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift,
+ size_t vl) {
+ return vnclipu_wv_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnclipu_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnclipu_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnclipu_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnclipu_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift,
+ size_t vl) {
+ return vnclipu_wv_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnclipu_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnclipu_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnclipu_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnclipu_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift,
+ size_t vl) {
+ return vnclipu_wv_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnclipu_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnclipu_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnclipu_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift,
+ size_t vl) {
+ return vnclipu_wv_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint16mf4_t op1, vuint8mf8_t shift,
+ size_t vl) {
+ return vnclip_wv_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint16mf4_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint16mf2_t op1, vuint8mf4_t shift,
+ size_t vl) {
+ return vnclip_wv_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint16mf2_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint16m1_t op1, vuint8mf2_t shift,
+ size_t vl) {
+ return vnclip_wv_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint16m1_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnclip_wv_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint16m2_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnclip_wv_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint16m4_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnclip_wv_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint16m8_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint32mf2_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vnclip_wv_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint32mf2_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint32m1_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vnclip_wv_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint32m1_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint32m2_t op1, vuint16m1_t shift,
+ size_t vl) {
+ return vnclip_wv_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint32m2_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint32m4_t op1, vuint16m2_t shift,
+ size_t vl) {
+ return vnclip_wv_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint32m4_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint32m8_t op1, vuint16m4_t shift,
+ size_t vl) {
+ return vnclip_wv_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint32m8_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint64m1_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vnclip_wv_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint64m1_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint64m2_t op1, vuint32m1_t shift,
+ size_t vl) {
+ return vnclip_wv_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint64m2_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint64m4_t op1, vuint32m2_t shift,
+ size_t vl) {
+ return vnclip_wv_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint64m4_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint64m8_t op1, vuint32m4_t shift,
+ size_t vl) {
+ return vnclip_wv_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclip_wx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint64m8_t op1, size_t shift, size_t vl) {
+ return vnclip_wx_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint16mf4_t op1, vuint8mf8_t shift,
+ size_t vl) {
+ return vnclipu_wv_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint16mf2_t op1, vuint8mf4_t shift,
+ size_t vl) {
+ return vnclipu_wv_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint16m1_t op1, vuint8mf2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint16m1_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint16m2_t op1, vuint8m1_t shift,
+ size_t vl) {
+ return vnclipu_wv_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint16m2_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint16m4_t op1, vuint8m2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint16m4_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint16m8_t op1, vuint8m4_t shift,
+ size_t vl) {
+ return vnclipu_wv_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint16m8_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint32mf2_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vnclipu_wv_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint32mf2_t op1, size_t shift,
+ size_t vl) {
+ return vnclipu_wx_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint32m1_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint32m1_t op1, size_t shift,
+ size_t vl) {
+ return vnclipu_wx_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint32m2_t op1, vuint16m1_t shift,
+ size_t vl) {
+ return vnclipu_wv_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint32m2_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint32m4_t op1, vuint16m2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint32m4_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint32m8_t op1, vuint16m4_t shift,
+ size_t vl) {
+ return vnclipu_wv_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint32m8_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint64m1_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint64m1_t op1, size_t shift,
+ size_t vl) {
+ return vnclipu_wx_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint64m2_t op1, vuint32m1_t shift,
+ size_t vl) {
+ return vnclipu_wv_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint64m2_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint64m4_t op1, vuint32m2_t shift,
+ size_t vl) {
+ return vnclipu_wv_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint64m4_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint64m8_t op1, vuint32m4_t shift,
+ size_t vl) {
+ return vnclipu_wv_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnclipu_wx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint64m8_t op1, size_t shift, size_t vl) {
+ return vnclipu_wx_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
new file mode 100644
index 000000000000..f3f2ee6a9905
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac_vv_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac_vx_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac_vv_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac_vx_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac_vv_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac_vx_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac_vv_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac_vx_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac_vv_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac_vx_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac_vv_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac_vx_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac_vv_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac_vx_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac_vv_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac_vv_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac_vv_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac_vv_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac_vv_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac_vv_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac_vv_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac_vv_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac_vv_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac_vv_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac_vv_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac_vv_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac_vv_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac_vv_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac_vv_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac_vv_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac_vx_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac_vv_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac_vx_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac_vv_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac_vx_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac_vv_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac_vx_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac_vv_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac_vx_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac_vv_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac_vx_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac_vv_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac_vx_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac_vv_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac_vx_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac_vv_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac_vx_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac_vv_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac_vx_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac_vv_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac_vx_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac_vv_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac_vx_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac_vv_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac_vx_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac_vv_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac_vx_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac_vv_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac_vx_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac_vv_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac_vx_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac_vv_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac_vx_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac_vv_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac_vx_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac_vv_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac_vx_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac_vv_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac_vx_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac_vv_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac_vx_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac_vv_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac_vx_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac_vv_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsac_vx_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac_vv_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsac_vx_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac_vv_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsac_vx_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac_vv_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsac_vx_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac_vv_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsac_vx_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac_vv_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsac_vx_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac_vv_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsac_vx_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac_vv_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsac_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac_vv_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsac_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac_vv_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsac_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac_vv_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsac_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac_vv_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsac_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac_vv_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsac_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac_vv_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsac_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac_vv_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsac_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac_vv_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsac_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac_vv_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsac_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac_vv_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsac_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac_vv_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsac_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac_vv_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsac_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac_vv_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsac_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac_vv_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsac_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac_vv_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsac_vx_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac_vv_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsac_vx_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac_vv_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsac_vx_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac_vv_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsac_vx_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac_vv_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsac_vx_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac_vv_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsac_vx_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac_vv_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsac_vx_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac_vv_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsac_vx_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac_vv_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsac_vx_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac_vv_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsac_vx_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac_vv_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsac_vx_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac_vv_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsac_vx_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac_vv_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsac_vx_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac_vv_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsac_vx_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac_vv_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsac_vx_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac_vv_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsac_vx_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac_vv_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsac_vx_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac_vv_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsac_vx_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac_vv_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsac_vx_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac_vv_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsac_vx_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac_vv_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsac_vx_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac_vv_u64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsac_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsac_vx_u64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
new file mode 100644
index 000000000000..4cc83f48c1a1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub_vv_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub_vx_i8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub_vv_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub_vx_i8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub_vv_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub_vx_i8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub_vv_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub_vx_i8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub_vv_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub_vx_i8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub_vv_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub_vx_i8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub_vv_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub_vx_i8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub_vv_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub_vv_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub_vv_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub_vv_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub_vv_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub_vv_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub_vv_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub_vv_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub_vv_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub_vv_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub_vv_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub_vv_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub_vv_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub_vv_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub_vv_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub_vv_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub_vx_u8mf8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub_vv_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub_vx_u8mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub_vv_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub_vx_u8mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub_vv_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub_vx_u8m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub_vv_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub_vx_u8m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub_vv_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub_vx_u8m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub_vv_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub_vx_u8m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub_vv_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub_vx_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub_vv_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub_vx_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub_vv_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub_vx_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub_vv_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub_vx_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub_vv_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub_vx_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub_vv_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub_vx_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub_vv_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub_vx_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub_vv_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub_vx_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub_vv_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub_vx_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub_vv_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub_vx_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub_vv_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub_vx_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub_vv_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub_vx_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub_vv_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub_vx_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub_vv_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub_vx_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub_vv_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub_vx_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub_vv_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
+ return vnmsub_vx_i8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub_vv_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
+ return vnmsub_vx_i8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub_vv_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
+ return vnmsub_vx_i8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub_vv_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
+ return vnmsub_vx_i8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub_vv_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
+ return vnmsub_vx_i8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub_vv_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
+ return vnmsub_vx_i8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub_vv_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
+ return vnmsub_vx_i8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub_vv_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vnmsub_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub_vv_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
+ return vnmsub_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub_vv_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
+ return vnmsub_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub_vv_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
+ return vnmsub_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub_vv_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
+ return vnmsub_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub_vv_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
+ return vnmsub_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub_vv_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
+ return vnmsub_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub_vv_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
+ return vnmsub_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub_vv_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
+ return vnmsub_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub_vv_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
+ return vnmsub_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub_vv_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
+ return vnmsub_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub_vv_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
+ return vnmsub_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub_vv_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
+ return vnmsub_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub_vv_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
+ return vnmsub_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub_vv_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
+ return vnmsub_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub_vv_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vnmsub_vx_u8mf8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub_vv_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vnmsub_vx_u8mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub_vv_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vnmsub_vx_u8mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub_vv_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
+ return vnmsub_vx_u8m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub_vv_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
+ return vnmsub_vx_u8m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub_vv_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
+ return vnmsub_vx_u8m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub_vv_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
+ return vnmsub_vx_u8m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub_vv_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
+ return vnmsub_vx_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub_vv_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vnmsub_vx_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub_vv_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vnmsub_vx_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub_vv_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vnmsub_vx_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub_vv_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vnmsub_vx_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub_vv_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
+ return vnmsub_vx_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub_vv_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vnmsub_vx_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub_vv_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vnmsub_vx_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub_vv_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vnmsub_vx_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub_vv_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vnmsub_vx_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub_vv_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
+ return vnmsub_vx_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub_vv_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
+ return vnmsub_vx_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub_vv_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
+ return vnmsub_vx_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub_vv_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
+ return vnmsub_vx_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub_vv_u64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnmsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {
+ return vnmsub_vx_u64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
new file mode 100644
index 000000000000..5783d5ef363c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c
@@ -0,0 +1,848 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnsra_wv_i8mf8(vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
+ return vnsra_wv_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnsra_wx_i8mf8(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnsra_wv_i8mf4(vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
+ return vnsra_wv_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnsra_wx_i8mf4(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnsra_wv_i8mf2(vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
+ return vnsra_wv_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnsra_wx_i8mf2(vint16m1_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnsra_wv_i8m1(vint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnsra_wv_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnsra_wx_i8m1(vint16m2_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnsra_wv_i8m2(vint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnsra_wv_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnsra_wx_i8m2(vint16m4_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnsra_wv_i8m4(vint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnsra_wv_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnsra_wx_i8m4(vint16m8_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnsra_wv_i16mf4(vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
+ return vnsra_wv_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnsra_wx_i16mf4(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnsra_wv_i16mf2(vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
+ return vnsra_wv_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnsra_wx_i16mf2(vint32m1_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnsra_wv_i16m1(vint32m2_t op1, vuint16m1_t shift, size_t vl) {
+ return vnsra_wv_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnsra_wx_i16m1(vint32m2_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnsra_wv_i16m2(vint32m4_t op1, vuint16m2_t shift, size_t vl) {
+ return vnsra_wv_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnsra_wx_i16m2(vint32m4_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnsra_wv_i16m4(vint32m8_t op1, vuint16m4_t shift, size_t vl) {
+ return vnsra_wv_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnsra_wx_i16m4(vint32m8_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnsra_wv_i32mf2(vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
+ return vnsra_wv_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnsra_wx_i32mf2(vint64m1_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnsra_wv_i32m1(vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ return vnsra_wv_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnsra_wx_i32m1(vint64m2_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnsra_wv_i32m2(vint64m4_t op1, vuint32m2_t shift, size_t vl) {
+ return vnsra_wv_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnsra_wx_i32m2(vint64m4_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) {
+ return vnsra_wv_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
+ return vnsra_wv_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
+ return vnsra_wv_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) {
+ return vnsra_wv_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnsra_wv_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnsra_wv_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnsra_wv_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
+ return vnsra_wv_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) {
+ return vnsra_wv_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) {
+ return vnsra_wv_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) {
+ return vnsra_wv_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) {
+ return vnsra_wv_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) {
+ return vnsra_wv_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ return vnsra_wv_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) {
+ return vnsra_wv_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) {
+ return vnsra_wv_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsra_wx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
+ return vnsra_wx_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
new file mode 100644
index 000000000000..38e4f109546a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c
@@ -0,0 +1,848 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnsrl_wv_u8mf8(vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
+ return vnsrl_wv_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnsrl_wx_u8mf8(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnsrl_wv_u8mf4(vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
+ return vnsrl_wv_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnsrl_wx_u8mf4(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnsrl_wv_u8mf2(vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
+ return vnsrl_wv_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnsrl_wx_u8mf2(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnsrl_wv_u8m1(vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnsrl_wv_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnsrl_wx_u8m1(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnsrl_wv_u8m2(vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnsrl_wv_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnsrl_wx_u8m2(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnsrl_wv_u8m4(vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnsrl_wv_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnsrl_wx_u8m4(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnsrl_wv_u16mf4(vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
+ return vnsrl_wv_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnsrl_wx_u16mf4(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnsrl_wv_u16mf2(vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
+ return vnsrl_wv_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnsrl_wx_u16mf2(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnsrl_wv_u16m1(vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
+ return vnsrl_wv_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnsrl_wx_u16m1(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnsrl_wv_u16m2(vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
+ return vnsrl_wv_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnsrl_wx_u16m2(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnsrl_wv_u16m4(vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
+ return vnsrl_wv_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnsrl_wx_u16m4(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnsrl_wv_u32mf2(vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
+ return vnsrl_wv_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnsrl_wx_u32mf2(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnsrl_wv_u32m1(vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ return vnsrl_wv_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnsrl_wx_u32m1(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnsrl_wv_u32m2(vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
+ return vnsrl_wv_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnsrl_wx_u32m2(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
+ return vnsrl_wv_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) {
+ return vnsrl_wv_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) {
+ return vnsrl_wv_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) {
+ return vnsrl_wv_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) {
+ return vnsrl_wv_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) {
+ return vnsrl_wv_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) {
+ return vnsrl_wv_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) {
+ return vnsrl_wv_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) {
+ return vnsrl_wv_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) {
+ return vnsrl_wv_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) {
+ return vnsrl_wv_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) {
+ return vnsrl_wv_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) {
+ return vnsrl_wv_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) {
+ return vnsrl_wv_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) {
+ return vnsrl_wv_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) {
+ return vnsrl_wv_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vnsrl_wx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
+ return vnsrl_wx_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
new file mode 100644
index 000000000000..544b52467304
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vor_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vor_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vor_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vor_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vor_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vor_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vor_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vor_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vor_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vor_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vor_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vor_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vor_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vor_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vor_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vor_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vor_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vor_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vor_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vor_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vor_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vor_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vor_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vor_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vor_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vor_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vor_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vor_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vor_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vor_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vor_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vor_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vor_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vor_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vor_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vor_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vor_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vor_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vor_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vor_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vor_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vor_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vor_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vor_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vor_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vor_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vor_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vor_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vor_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vor_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vor_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vor_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vor_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vor_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vor_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vor_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vor_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vor_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vor_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vor_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vor_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vor_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vor_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vor_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vor_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vor_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vor_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vor_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vor_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vor_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vor_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vor_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vor_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vor_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vor_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vor_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vor_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vor_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vor_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vor_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vor_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vor_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vor_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vor_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vor_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vor_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c
new file mode 100644
index 000000000000..a94aa34d1f9f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vpopc.c
@@ -0,0 +1,204 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) {
+ return vpopc_m_b1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) {
+ return vpopc_m_b2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) {
+ return vpopc_m_b4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) {
+ return vpopc_m_b8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) {
+ return vpopc_m_b16(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) {
+ return vpopc_m_b32(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) {
+ return vpopc_m_b64(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv64i1.i32(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
+ return vpopc_m_b1_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv32i1.i32(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
+ return vpopc_m_b2_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv16i1.i32(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
+ return vpopc_m_b4_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv8i1.i32(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
+ return vpopc_m_b8_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b16_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv4i1.i32(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
+ return vpopc_m_b16_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b32_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv2i1.i32(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
+ return vpopc_m_b32_m(mask, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vpopc_m_b64_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vpopc.mask.nxv1i1.i32(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret i32 [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vpopc_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
+ return vpopc_m_b64_m(mask, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
new file mode 100644
index 000000000000..268f2e4cd9f0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredand_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredand_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredand_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredand_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredand_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredand_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredand_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredand_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredand_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredand_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredand_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredand_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredand_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredand_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredand_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredand_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredand_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
new file mode 100644
index 000000000000..490121430daa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmax_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmax_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmax_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmax_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmax_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmax_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmax_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmax_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmax_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmax_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmax_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmax_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredmaxu_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredmaxu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
new file mode 100644
index 000000000000..e9e8cac3a5a4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredmin_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredmin_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmin_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmin_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmin_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmin_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredmin_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmin_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmin_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmin_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredmin_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredminu_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredmin_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredminu_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredminu_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredminu_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredminu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
new file mode 100644
index 000000000000..c933e4a0f73a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredor_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredor_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredor_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredor_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredor_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredor_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredor_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredor_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredor_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredor_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredor_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredor_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredor_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredor_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredor_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredor_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector, vuint16m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector, vuint16m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector, vuint16m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector, vuint16m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector, vuint32m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector, vuint32m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector, vuint32m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector, vuint32m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector, vuint64m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector, vuint64m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector, vuint64m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector, vuint64m1_t scalar,
+ size_t vl) {
+ return vredor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
new file mode 100644
index 000000000000..e574243d0f2d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredsum_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredsum_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredsum_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredsum_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredsum_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredsum_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredsum_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredsum_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredsum_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredsum_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredsum_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredsum_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredsum_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredsum_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredsum_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredsum_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
new file mode 100644
index 000000000000..04de6d0e1e6b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+ vint8m1_t scalar, size_t vl) {
+ return vredxor_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vredxor_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredxor_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredxor_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredxor_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredxor_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vredxor_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredxor_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredxor_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredxor_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vredxor_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+ vuint8m1_t scalar, size_t vl) {
+ return vredxor_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+ vint8mf8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+ vint8mf4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+ vint8mf2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+ vint8m1_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+ vint8m2_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+ vint8m4_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+ vint8m8_t vector, vint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint16mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint16mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint16m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint16m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint16m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint32mf2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint32m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint32m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint32m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint64m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint64m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint64m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+ vuint8mf8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+ vuint8mf4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+ vuint8mf2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+ vuint8m2_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+ vuint8m4_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+ vuint8m8_t vector, vuint8m1_t scalar,
+ size_t vl) {
+ return vredxor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint16mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint16mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint16m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint16m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint16m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vredxor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint32mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint32m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint32m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint32m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vredxor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint64m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint64m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint64m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vredxor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
new file mode 100644
index 000000000000..06e0e8a2749f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vrem_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vrem_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vrem_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vrem_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vrem_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vrem_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vrem_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vrem_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vrem_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vrem_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vrem_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vrem_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vrem_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vrem_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vrem_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vrem_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vrem_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vrem_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vrem_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vrem_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vrem_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vrem_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vremu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vremu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vremu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vremu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vremu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vremu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vremu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vremu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vremu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vremu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vremu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vremu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vremu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vremu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vremu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vremu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vremu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vremu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vremu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vremu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vremu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vremu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vrem_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrem.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vrem_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrem.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vrem_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrem.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vrem_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrem.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vrem_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrem.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vrem_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrem.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vrem_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrem.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrem_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vrem_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrem.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vrem_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrem.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vrem_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrem.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vrem_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrem.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vrem_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrem.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vrem_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrem.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrem_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vrem_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrem.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vrem_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrem.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vrem_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrem.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vrem_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrem.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vrem_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrem.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrem_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vrem_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrem.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vrem_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrem.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vrem_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrem.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vrem_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrem_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrem.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrem_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vremu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vremu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vremu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vremu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vremu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vremu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vremu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vremu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vremu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vremu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vremu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vremu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vremu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vremu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vremu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vremu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vremu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vremu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vremu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vremu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vremu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vremu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vremu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vremu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vremu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vremu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vremu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vremu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vremu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vremu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vremu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vremu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vremu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vremu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vremu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vremu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vremu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vremu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vremu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vremu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vremu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vremu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vremu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vremu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vremu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vremu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vremu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vremu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vremu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
new file mode 100644
index 000000000000..0268c4baab9e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c
@@ -0,0 +1,4785 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgather_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t index,
+ size_t vl) {
+ return vrgather_vv_i8mf8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgather_vx_i8mf8(vint8mf8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8mf8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgather_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t index,
+ size_t vl) {
+ return vrgather_vv_i8mf4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgather_vx_i8mf4(vint8mf4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8mf4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgather_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t index,
+ size_t vl) {
+ return vrgather_vv_i8mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgather_vx_i8mf2(vint8mf2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgather_vv_i8m1(vint8m1_t op1, vuint8m1_t index, size_t vl) {
+ return vrgather_vv_i8m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgather_vx_i8m1(vint8m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgather_vv_i8m2(vint8m2_t op1, vuint8m2_t index, size_t vl) {
+ return vrgather_vv_i8m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgather_vx_i8m2(vint8m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgather_vv_i8m4(vint8m4_t op1, vuint8m4_t index, size_t vl) {
+ return vrgather_vv_i8m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgather_vx_i8m4(vint8m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrgather_vv_i8m8(vint8m8_t op1, vuint8m8_t index, size_t vl) {
+ return vrgather_vv_i8m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrgather_vx_i8m8(vint8m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgather_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t index,
+ size_t vl) {
+ return vrgather_vv_i16mf4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgather_vx_i16mf4(vint16mf4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i16mf4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgather_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t index,
+ size_t vl) {
+ return vrgather_vv_i16mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgather_vx_i16mf2(vint16mf2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i16mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgather_vv_i16m1(vint16m1_t op1, vuint16m1_t index,
+ size_t vl) {
+ return vrgather_vv_i16m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgather_vx_i16m1(vint16m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i16m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgather_vv_i16m2(vint16m2_t op1, vuint16m2_t index,
+ size_t vl) {
+ return vrgather_vv_i16m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgather_vx_i16m2(vint16m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i16m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgather_vv_i16m4(vint16m4_t op1, vuint16m4_t index,
+ size_t vl) {
+ return vrgather_vv_i16m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgather_vx_i16m4(vint16m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i16m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgather_vv_i16m8(vint16m8_t op1, vuint16m8_t index,
+ size_t vl) {
+ return vrgather_vv_i16m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgather_vx_i16m8(vint16m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i16m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgather_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather_vv_i32mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgather_vx_i32mf2(vint32mf2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i32mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgather_vv_i32m1(vint32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather_vv_i32m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgather_vx_i32m1(vint32m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i32m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgather_vv_i32m2(vint32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather_vv_i32m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgather_vx_i32m2(vint32m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i32m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgather_vv_i32m4(vint32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather_vv_i32m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgather_vx_i32m4(vint32m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i32m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgather_vv_i32m8(vint32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather_vv_i32m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgather_vx_i32m8(vint32m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i32m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgather_vv_i64m1(vint64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather_vv_i64m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgather_vx_i64m1(vint64m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i64m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgather_vv_i64m2(vint64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather_vv_i64m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgather_vx_i64m2(vint64m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i64m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgather_vv_i64m4(vint64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather_vv_i64m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgather_vx_i64m4(vint64m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i64m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgather_vv_i64m8(vint64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather_vv_i64m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgather_vx_i64m8(vint64m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i64m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgather_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t index,
+ size_t vl) {
+ return vrgather_vv_u8mf8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgather_vx_u8mf8(vuint8mf8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8mf8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgather_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t index,
+ size_t vl) {
+ return vrgather_vv_u8mf4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgather_vx_u8mf4(vuint8mf4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8mf4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgather_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t index,
+ size_t vl) {
+ return vrgather_vv_u8mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgather_vx_u8mf2(vuint8mf2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgather_vv_u8m1(vuint8m1_t op1, vuint8m1_t index, size_t vl) {
+ return vrgather_vv_u8m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgather_vx_u8m1(vuint8m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgather_vv_u8m2(vuint8m2_t op1, vuint8m2_t index, size_t vl) {
+ return vrgather_vv_u8m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgather_vx_u8m2(vuint8m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgather_vv_u8m4(vuint8m4_t op1, vuint8m4_t index, size_t vl) {
+ return vrgather_vv_u8m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgather_vx_u8m4(vuint8m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrgather_vv_u8m8(vuint8m8_t op1, vuint8m8_t index, size_t vl) {
+ return vrgather_vv_u8m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrgather_vx_u8m8(vuint8m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgather_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t index,
+ size_t vl) {
+ return vrgather_vv_u16mf4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgather_vx_u16mf4(vuint16mf4_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_u16mf4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgather_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t index,
+ size_t vl) {
+ return vrgather_vv_u16mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgather_vx_u16mf2(vuint16mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_u16mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgather_vv_u16m1(vuint16m1_t op1, vuint16m1_t index,
+ size_t vl) {
+ return vrgather_vv_u16m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgather_vx_u16m1(vuint16m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u16m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgather_vv_u16m2(vuint16m2_t op1, vuint16m2_t index,
+ size_t vl) {
+ return vrgather_vv_u16m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgather_vx_u16m2(vuint16m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u16m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgather_vv_u16m4(vuint16m4_t op1, vuint16m4_t index,
+ size_t vl) {
+ return vrgather_vv_u16m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgather_vx_u16m4(vuint16m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u16m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgather_vv_u16m8(vuint16m8_t op1, vuint16m8_t index,
+ size_t vl) {
+ return vrgather_vv_u16m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgather_vx_u16m8(vuint16m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u16m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgather_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather_vv_u32mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgather_vx_u32mf2(vuint32mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_u32mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgather_vv_u32m1(vuint32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather_vv_u32m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgather_vx_u32m1(vuint32m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u32m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgather_vv_u32m2(vuint32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather_vv_u32m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgather_vx_u32m2(vuint32m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u32m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgather_vv_u32m4(vuint32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather_vv_u32m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgather_vx_u32m4(vuint32m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u32m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgather_vv_u32m8(vuint32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather_vv_u32m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgather_vx_u32m8(vuint32m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u32m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgather_vv_u64m1(vuint64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather_vv_u64m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgather_vx_u64m1(vuint64m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u64m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgather_vv_u64m2(vuint64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather_vv_u64m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgather_vx_u64m2(vuint64m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u64m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgather_vv_u64m4(vuint64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather_vv_u64m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgather_vx_u64m4(vuint64m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u64m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgather_vv_u64m8(vuint64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather_vv_u64m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u64m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgather_vv_f32mf2(vfloat32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather_vv_f32mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgather_vx_f32mf2(vfloat32mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_f32mf2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgather_vv_f32m1(vfloat32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather_vv_f32m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgather_vx_f32m1(vfloat32m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_f32m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgather_vv_f32m2(vfloat32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather_vv_f32m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgather_vx_f32m2(vfloat32m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_f32m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgather_vv_f32m4(vfloat32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather_vv_f32m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgather_vx_f32m4(vfloat32m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_f32m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgather_vv_f32m8(vfloat32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather_vv_f32m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgather_vx_f32m8(vfloat32m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_f32m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgather_vv_f64m1(vfloat64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather_vv_f64m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgather_vx_f64m1(vfloat64m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_f64m1(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgather_vv_f64m2(vfloat64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather_vv_f64m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgather_vx_f64m2(vfloat64m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_f64m2(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgather_vv_f64m4(vfloat64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather_vv_f64m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgather_vx_f64m4(vfloat64m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_f64m4(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgather_vv_f64m8(vfloat64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather_vv_f64m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgather_vx_f64m8(vfloat64m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_f64m8(op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgatherei16_vv_i8mf8(vint8mf8_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgatherei16_vv_i8mf4(vint8mf4_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgatherei16_vv_i8mf2(vint8mf2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgatherei16_vv_i8m1(vint8m1_t op1, vuint16m2_t op2, size_t vl) {
+ return vrgatherei16_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgatherei16_vv_i8m2(vint8m2_t op1, vuint16m4_t op2, size_t vl) {
+ return vrgatherei16_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgatherei16_vv_i8m4(vint8m4_t op1, vuint16m8_t op2, size_t vl) {
+ return vrgatherei16_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgatherei16_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgatherei16_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgatherei16_vv_i16m1(vint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgatherei16_vv_i16m2(vint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgatherei16_vv_i16m4(vint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgatherei16_vv_i16m8(vint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgatherei16_vv_i32mf2(vint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgatherei16_vv_i32m1(vint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgatherei16_vv_i32m2(vint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgatherei16_vv_i32m4(vint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgatherei16_vv_i32m8(vint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgatherei16_vv_i64m1(vint64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgatherei16_vv_i64m2(vint64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgatherei16_vv_i64m4(vint64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgatherei16_vv_i64m8(vint64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgatherei16_vv_u8mf8(vuint8mf8_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgatherei16_vv_u8mf4(vuint8mf4_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgatherei16_vv_u8mf2(vuint8mf2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgatherei16_vv_u8m1(vuint8m1_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgatherei16_vv_u8m2(vuint8m2_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgatherei16_vv_u8m4(vuint8m4_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgatherei16_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgatherei16_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgatherei16_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgatherei16_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgatherei16_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgatherei16_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgatherei16_vv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgatherei16_vv_u32m1(vuint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgatherei16_vv_u32m2(vuint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgatherei16_vv_u32m4(vuint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgatherei16_vv_u32m8(vuint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgatherei16_vv_u64m1(vuint64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgatherei16_vv_u64m2(vuint64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgatherei16_vv_u64m4(vuint64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgatherei16_vv_u64m8(vuint64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i32(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgatherei16_vv_f32mf2(vfloat32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i32(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgatherei16_vv_f32m1(vfloat32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgatherei16_vv_f32m2(vfloat32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i32(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgatherei16_vv_f32m4(vfloat32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i32(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgatherei16_vv_f32m8(vfloat32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i32(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgatherei16_vv_f64m1(vfloat64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i32(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgatherei16_vv_f64m2(vfloat64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i32(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i32(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vuint8mf8_t index,
+ size_t vl) {
+ return vrgather_vv_i8mf8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8mf8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vuint8mf4_t index,
+ size_t vl) {
+ return vrgather_vv_i8mf4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8mf4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vuint8mf2_t index,
+ size_t vl) {
+ return vrgather_vv_i8mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vuint8m1_t index, size_t vl) {
+ return vrgather_vv_i8m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vuint8m2_t index, size_t vl) {
+ return vrgather_vv_i8m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vuint8m4_t index, size_t vl) {
+ return vrgather_vv_i8m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vuint8m8_t index, size_t vl) {
+ return vrgather_vv_i8m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i8m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vuint16mf4_t index,
+ size_t vl) {
+ return vrgather_vv_i16mf4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_i16mf4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vuint16mf2_t index,
+ size_t vl) {
+ return vrgather_vv_i16mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_i16mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vuint16m1_t index,
+ size_t vl) {
+ return vrgather_vv_i16m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i16m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vuint16m2_t index,
+ size_t vl) {
+ return vrgather_vv_i16m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i16m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vuint16m4_t index,
+ size_t vl) {
+ return vrgather_vv_i16m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i16m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vuint16m8_t index,
+ size_t vl) {
+ return vrgather_vv_i16m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i16m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather_vv_i32mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_i32mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather_vv_i32m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i32m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather_vv_i32m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i32m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather_vv_i32m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i32m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather_vv_i32m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i32m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather_vv_i64m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i64m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather_vv_i64m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i64m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather_vv_i64m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i64m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather_vv_i64m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_i64m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t index,
+ size_t vl) {
+ return vrgather_vv_u8mf8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8mf8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t index,
+ size_t vl) {
+ return vrgather_vv_u8mf4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8mf4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t index,
+ size_t vl) {
+ return vrgather_vv_u8mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t index,
+ size_t vl) {
+ return vrgather_vv_u8m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t index,
+ size_t vl) {
+ return vrgather_vv_u8m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t index,
+ size_t vl) {
+ return vrgather_vv_u8m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t index,
+ size_t vl) {
+ return vrgather_vv_u8m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u8m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t index,
+ size_t vl) {
+ return vrgather_vv_u16mf4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_u16mf4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t index,
+ size_t vl) {
+ return vrgather_vv_u16mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_u16mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t index,
+ size_t vl) {
+ return vrgather_vv_u16m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u16m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t index,
+ size_t vl) {
+ return vrgather_vv_u16m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u16m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t index,
+ size_t vl) {
+ return vrgather_vv_u16m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u16m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t index,
+ size_t vl) {
+ return vrgather_vv_u16m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u16m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather_vv_u32mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_u32mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather_vv_u32m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u32m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather_vv_u32m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u32m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather_vv_u32m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u32m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather_vv_u32m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u32m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather_vv_u64m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u64m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather_vv_u64m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u64m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather_vv_u64m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u64m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather_vv_u64m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, size_t index, size_t vl) {
+ return vrgather_vx_u64m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i32> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vuint32mf2_t index,
+ size_t vl) {
+ return vrgather_vv_f32mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_f32mf2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i32> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vuint32m1_t index,
+ size_t vl) {
+ return vrgather_vv_f32m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_f32m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i32> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vuint32m2_t index,
+ size_t vl) {
+ return vrgather_vv_f32m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_f32m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i32> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vuint32m4_t index,
+ size_t vl) {
+ return vrgather_vv_f32m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_f32m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i32> [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vuint32m8_t index,
+ size_t vl) {
+ return vrgather_vv_f32m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_f32m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i64> [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vuint64m1_t index,
+ size_t vl) {
+ return vrgather_vv_f64m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_f64m1_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i64> [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vuint64m2_t index,
+ size_t vl) {
+ return vrgather_vv_f64m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_f64m2_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i64> [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vuint64m4_t index,
+ size_t vl) {
+ return vrgather_vv_f64m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_f64m4_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i64> [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vuint64m8_t index,
+ size_t vl) {
+ return vrgather_vv_f64m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgather_vx_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i32 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], i64 [[INDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, size_t index,
+ size_t vl) {
+ return vrgather_vx_f64m8_m(mask, maskedoff, op1, index, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask,
+ vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask,
+ vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask,
+ vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i32(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask,
+ vfloat32mf2_t maskedoff,
+ vfloat32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i32(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask,
+ vfloat32m1_t maskedoff,
+ vfloat32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i32(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask,
+ vfloat32m2_t maskedoff,
+ vfloat32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i32(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff,
+ vfloat32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i32(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff,
+ vfloat32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i32(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask,
+ vfloat64m1_t maskedoff,
+ vfloat64m1_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i32(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask,
+ vfloat64m2_t maskedoff,
+ vfloat64m2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i32(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask,
+ vfloat64m4_t maskedoff,
+ vfloat64m4_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrgatherei16_vv_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i32(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
+ vfloat64m8_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vrgatherei16_vv_f64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
new file mode 100644
index 000000000000..51f905001e57
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c
@@ -0,0 +1,1240 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vrsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vrsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vrsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vrsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vrsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vrsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vrsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vrsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vrsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
new file mode 100644
index 000000000000..f81ebc026902
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c
@@ -0,0 +1,2587 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsadd_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsadd_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsadd_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsadd_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsadd_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsadd_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsadd_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsadd_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsadd_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsadd_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsadd_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsadd_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsadd_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsadd_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsadd_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsadd_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsadd_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsadd_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsadd_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsadd_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsadd_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsadd_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsadd_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsadd_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsadd_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsadd_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsadd_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsadd_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsadd_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsadd_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsadd_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsaddu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vsaddu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsaddu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsaddu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vsaddu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsaddu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsaddu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vsaddu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsaddu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsaddu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsaddu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsaddu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsaddu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsaddu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsaddu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsaddu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsaddu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsaddu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsaddu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsaddu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsaddu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsaddu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vsaddu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsaddu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vsaddu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsaddu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vsaddu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsaddu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vsaddu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsaddu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vsaddu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsaddu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsaddu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsaddu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vsaddu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsaddu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsaddu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsaddu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vsaddu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsaddu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsaddu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsaddu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vsaddu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsaddu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsaddu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsaddu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vsaddu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsaddu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vsaddu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsaddu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vsaddu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsaddu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsaddu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsaddu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vsaddu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsaddu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsaddu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsaddu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vsaddu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsaddu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsaddu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsaddu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vsaddu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsaddu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsaddu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsaddu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vsaddu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsaddu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsaddu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsaddu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vsaddu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsaddu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsaddu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsaddu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vsaddu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsaddu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsaddu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vsaddu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsaddu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vsadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vsadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vsadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsadd_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vsaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vsaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vsaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vsaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vsaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vsaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vsaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vsaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vsaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vsaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vsaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vsaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vsaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vsaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vsaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vsaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vsaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vsaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vsaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vsaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vsaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsaddu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c
new file mode 100644
index 000000000000..b8f051b86517
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsbc.c
@@ -0,0 +1,1328 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsbc_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vvm_i8mf8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsbc_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i8mf8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsbc_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vvm_i8mf4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsbc_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i8mf4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsbc_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vvm_i8mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsbc_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i8mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsbc_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc_vvm_i8m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsbc_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i8m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsbc_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc_vvm_i8m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsbc_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i8m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsbc_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vsbc_vvm_i8m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsbc_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i8m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsbc_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vsbc_vvm_i8m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsbc_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i8m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsbc_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vvm_i16mf4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsbc_vxm_i16mf4(vint16mf4_t op1, int16_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vxm_i16mf4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsbc_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vvm_i16mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsbc_vxm_i16mf2(vint16mf2_t op1, int16_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vxm_i16mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsbc_vvm_i16m1(vint16m1_t op1, vint16m1_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vvm_i16m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsbc_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i16m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsbc_vvm_i16m2(vint16m2_t op1, vint16m2_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc_vvm_i16m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsbc_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i16m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsbc_vvm_i16m4(vint16m4_t op1, vint16m4_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc_vvm_i16m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsbc_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i16m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsbc_vvm_i16m8(vint16m8_t op1, vint16m8_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vsbc_vvm_i16m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsbc_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i16m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsbc_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vvm_i32mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsbc_vxm_i32mf2(vint32mf2_t op1, int32_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vxm_i32mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsbc_vvm_i32m1(vint32m1_t op1, vint32m1_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vvm_i32m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsbc_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i32m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsbc_vvm_i32m2(vint32m2_t op1, vint32m2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vvm_i32m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsbc_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i32m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsbc_vvm_i32m4(vint32m4_t op1, vint32m4_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc_vvm_i32m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsbc_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i32m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsbc_vvm_i32m8(vint32m8_t op1, vint32m8_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc_vvm_i32m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsbc_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i32m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsbc_vvm_i64m1(vint64m1_t op1, vint64m1_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vvm_i64m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsbc_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i64m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsbc_vvm_i64m2(vint64m2_t op1, vint64m2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vvm_i64m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsbc_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i64m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsbc_vvm_i64m4(vint64m4_t op1, vint64m4_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vvm_i64m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsbc_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i64m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsbc_vvm_i64m8(vint64m8_t op1, vint64m8_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc_vvm_i64m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsbc_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_i64m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsbc_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vvm_u8mf8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsbc.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsbc_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vxm_u8mf8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsbc_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vvm_u8mf4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsbc.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsbc_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vxm_u8mf4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsbc_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vvm_u8mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsbc.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsbc_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vxm_u8mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsbc_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc_vvm_u8m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsbc.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsbc_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_u8m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsbc_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc_vvm_u8m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsbc.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsbc_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_u8m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsbc_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vsbc_vvm_u8m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsbc.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsbc_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_u8m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsbc_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vsbc_vvm_u8m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsbc.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsbc_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t borrowin,
+ size_t vl) {
+ return vsbc_vxm_u8m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsbc_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vvm_u16mf4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsbc.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsbc_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vxm_u16mf4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsbc_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vvm_u16mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsbc.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsbc_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vxm_u16mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsbc_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vvm_u16m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsbc.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsbc_vxm_u16m1(vuint16m1_t op1, uint16_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vxm_u16m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsbc_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc_vvm_u16m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsbc.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsbc_vxm_u16m2(vuint16m2_t op1, uint16_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc_vxm_u16m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsbc_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc_vvm_u16m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsbc.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsbc_vxm_u16m4(vuint16m4_t op1, uint16_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc_vxm_u16m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsbc_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vsbc_vvm_u16m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsbc.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsbc_vxm_u16m8(vuint16m8_t op1, uint16_t op2,
+ vbool2_t borrowin, size_t vl) {
+ return vsbc_vxm_u16m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsbc_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vvm_u32mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsbc.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsbc_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vxm_u32mf2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsbc_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vvm_u32m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsbc.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsbc_vxm_u32m1(vuint32m1_t op1, uint32_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vxm_u32m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsbc_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vvm_u32m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsbc.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsbc_vxm_u32m2(vuint32m2_t op1, uint32_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vxm_u32m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsbc_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc_vvm_u32m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsbc.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsbc_vxm_u32m4(vuint32m4_t op1, uint32_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc_vxm_u32m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsbc_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc_vvm_u32m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsbc.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsbc_vxm_u32m8(vuint32m8_t op1, uint32_t op2,
+ vbool4_t borrowin, size_t vl) {
+ return vsbc_vxm_u32m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsbc_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vvm_u64m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsbc.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsbc_vxm_u64m1(vuint64m1_t op1, uint64_t op2,
+ vbool64_t borrowin, size_t vl) {
+ return vsbc_vxm_u64m1(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsbc_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vvm_u64m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsbc.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsbc_vxm_u64m2(vuint64m2_t op1, uint64_t op2,
+ vbool32_t borrowin, size_t vl) {
+ return vsbc_vxm_u64m2(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsbc_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vvm_u64m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsbc.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsbc_vxm_u64m4(vuint64m4_t op1, uint64_t op2,
+ vbool16_t borrowin, size_t vl) {
+ return vsbc_vxm_u64m4(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vvm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vvm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsbc_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc_vvm_u64m8(op1, op2, borrowin, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsbc_vxm_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsbc_vxm_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsbc.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[BORROWIN:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsbc_vxm_u64m8(vuint64m8_t op1, uint64_t op2,
+ vbool8_t borrowin, size_t vl) {
+ return vsbc_vxm_u64m8(op1, op2, borrowin, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c
index c74a26d4551b..facd1aa8c480 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c
@@ -1,10 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
-// RUN: -Werror -Wall -o - %s >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
@@ -1704,3 +1705,115 @@ void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size
void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) {
return vse64_v_f64m8_m(mask, base, value, vl);
}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv64i1.i32(<vscale x 64 x i1> [[VALUE:%.*]], <vscale x 64 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv64i1.i64(<vscale x 64 x i1> [[VALUE:%.*]], <vscale x 64 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b1(uint8_t *base, vbool1_t value, size_t vl) {
+ return vse1_v_b1(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv32i1.i32(<vscale x 32 x i1> [[VALUE:%.*]], <vscale x 32 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv32i1.i64(<vscale x 32 x i1> [[VALUE:%.*]], <vscale x 32 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b2(uint8_t *base, vbool2_t value, size_t vl) {
+ return vse1_v_b2(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv16i1.i32(<vscale x 16 x i1> [[VALUE:%.*]], <vscale x 16 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv16i1.i64(<vscale x 16 x i1> [[VALUE:%.*]], <vscale x 16 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b4(uint8_t *base, vbool4_t value, size_t vl) {
+ return vse1_v_b4(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv8i1.i32(<vscale x 8 x i1> [[VALUE:%.*]], <vscale x 8 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv8i1.i64(<vscale x 8 x i1> [[VALUE:%.*]], <vscale x 8 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b8(uint8_t *base, vbool8_t value, size_t vl) {
+ return vse1_v_b8(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b16(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv4i1.i32(<vscale x 4 x i1> [[VALUE:%.*]], <vscale x 4 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv4i1.i64(<vscale x 4 x i1> [[VALUE:%.*]], <vscale x 4 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b16(uint8_t *base, vbool16_t value, size_t vl) {
+ return vse1_v_b16(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b32(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv2i1.i32(<vscale x 2 x i1> [[VALUE:%.*]], <vscale x 2 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv2i1.i64(<vscale x 2 x i1> [[VALUE:%.*]], <vscale x 2 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) {
+ return vse1_v_b32(base, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vse1_v_b64(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vse1.nxv1i1.i32(<vscale x 1 x i1> [[VALUE:%.*]], <vscale x 1 x i1>* [[TMP0]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vse1_v_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i1>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vse1.nxv1i1.i64(<vscale x 1 x i1> [[VALUE:%.*]], <vscale x 1 x i1>* [[TMP0]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vse1_v_b64(uint8_t *base, vbool64_t value, size_t vl) {
+ return vse1_v_b64(base, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c
index e837653304a7..5d68209d88a7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-
+// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -emit-llvm -o - %s \
// RUN: | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -emit-llvm -o - %s \
// RUN: | FileCheck --check-prefix=CHECK-RV64 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -Werror -Wall -o - \
-// RUN: %s > /dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c
index 2d2dd6e53174..447ca0595f0b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c
@@ -1,11 +1,11 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-
+// REQUIRES: riscv-registered-target
// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -emit-llvm -o - %s \
// RUN: | FileCheck --check-prefix=CHECK-RV32 %s
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -emit-llvm -o - %s \
// RUN: | FileCheck --check-prefix=CHECK-RV64 %s
-// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -Werror -Wall -o - \
-// RUN: %s > /dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
// ASM-NOT: warning
#include <riscv_vector.h>
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
new file mode 100644
index 000000000000..c2634603e65f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
@@ -0,0 +1,820 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
+ return vsext_vf2_i16mf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
+ return vsext_vf2_i16mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
+ return vsext_vf2_i16m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
+ return vsext_vf2_i16m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
+ return vsext_vf2_i16m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
+ return vsext_vf2_i16m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
+ return vsext_vf4_i32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
+ return vsext_vf4_i32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
+ return vsext_vf4_i32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
+ return vsext_vf4_i32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
+ return vsext_vf4_i32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
+ return vsext_vf8_i64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
+ return vsext_vf8_i64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
+ return vsext_vf8_i64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
+ return vsext_vf8_i64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
+ return vsext_vf2_i32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
+ return vsext_vf2_i32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
+ return vsext_vf2_i32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
+ return vsext_vf2_i32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
+ return vsext_vf2_i32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
+ return vsext_vf4_i64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
+ return vsext_vf4_i64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
+ return vsext_vf4_i64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
+ return vsext_vf4_i64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
+ return vsext_vf2_i64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
+ return vsext_vf2_i64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
+ return vsext_vf2_i64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
+ return vsext_vf2_i64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint8mf8_t op1, size_t vl) {
+ return vsext_vf2_i16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint8mf4_t op1, size_t vl) {
+ return vsext_vf2_i16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint8mf2_t op1, size_t vl) {
+ return vsext_vf2_i16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint8m1_t op1, size_t vl) {
+ return vsext_vf2_i16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint8m2_t op1, size_t vl) {
+ return vsext_vf2_i16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint8m4_t op1, size_t vl) {
+ return vsext_vf2_i16m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint8mf8_t op1, size_t vl) {
+ return vsext_vf4_i32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint8mf4_t op1, size_t vl) {
+ return vsext_vf4_i32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint8mf2_t op1, size_t vl) {
+ return vsext_vf4_i32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint8m1_t op1, size_t vl) {
+ return vsext_vf4_i32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint8m2_t op1, size_t vl) {
+ return vsext_vf4_i32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint8mf8_t op1, size_t vl) {
+ return vsext_vf8_i64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint8mf4_t op1, size_t vl) {
+ return vsext_vf8_i64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint8mf2_t op1, size_t vl) {
+ return vsext_vf8_i64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint8m1_t op1, size_t vl) {
+ return vsext_vf8_i64m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint16mf4_t op1, size_t vl) {
+ return vsext_vf2_i32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint16mf2_t op1, size_t vl) {
+ return vsext_vf2_i32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint16m1_t op1, size_t vl) {
+ return vsext_vf2_i32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint16m2_t op1, size_t vl) {
+ return vsext_vf2_i32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint16m4_t op1, size_t vl) {
+ return vsext_vf2_i32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint16mf4_t op1, size_t vl) {
+ return vsext_vf4_i64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint16mf2_t op1, size_t vl) {
+ return vsext_vf4_i64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint16m1_t op1, size_t vl) {
+ return vsext_vf4_i64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint16m2_t op1, size_t vl) {
+ return vsext_vf4_i64m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint32mf2_t op1, size_t vl) {
+ return vsext_vf2_i64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint32m1_t op1, size_t vl) {
+ return vsext_vf2_i64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint32m2_t op1, size_t vl) {
+ return vsext_vf2_i64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint32m4_t op1, size_t vl) {
+ return vsext_vf2_i64m8_m(mask, maskedoff, op1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
new file mode 100644
index 000000000000..c35a3ffa35a8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c
@@ -0,0 +1,1348 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i32(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslide1down_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8mf8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i32(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslide1down_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8mf4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i32(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslide1down_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i32(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslide1down_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i32(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslide1down_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i32(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslide1down_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i32(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslide1down_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslide1down_vx_i16mf4(vint16mf4_t src, int16_t value,
+ size_t vl) {
+ return vslide1down_vx_i16mf4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslide1down_vx_i16mf2(vint16mf2_t src, int16_t value,
+ size_t vl) {
+ return vslide1down_vx_i16mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslide1down_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
+ return vslide1down_vx_i16m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslide1down_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
+ return vslide1down_vx_i16m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslide1down_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
+ return vslide1down_vx_i16m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i32(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslide1down_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
+ return vslide1down_vx_i16m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslide1down_vx_i32mf2(vint32mf2_t src, int32_t value,
+ size_t vl) {
+ return vslide1down_vx_i32mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslide1down_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
+ return vslide1down_vx_i32m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslide1down_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
+ return vslide1down_vx_i32m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslide1down_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
+ return vslide1down_vx_i32m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslide1down_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
+ return vslide1down_vx_i32m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslide1down_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
+ return vslide1down_vx_i64m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslide1down_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
+ return vslide1down_vx_i64m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslide1down_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
+ return vslide1down_vx_i64m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslide1down_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
+ return vslide1down_vx_i64m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i32(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslide1down_vx_u8mf8(vuint8mf8_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down_vx_u8mf8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i32(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslide1down_vx_u8mf4(vuint8mf4_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down_vx_u8mf4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i32(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslide1down_vx_u8mf2(vuint8mf2_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down_vx_u8mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i32(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslide1down_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
+ return vslide1down_vx_u8m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i32(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslide1down_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
+ return vslide1down_vx_u8m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i32(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslide1down_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
+ return vslide1down_vx_u8m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i32(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslide1down_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
+ return vslide1down_vx_u8m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslide1down_vx_u16mf4(vuint16mf4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16mf4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslide1down_vx_u16mf2(vuint16mf2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslide1down_vx_u16m1(vuint16m1_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslide1down_vx_u16m2(vuint16m2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslide1down_vx_u16m4(vuint16m4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i32(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslide1down_vx_u16m8(vuint16m8_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslide1down_vx_u32mf2(vuint32mf2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down_vx_u32mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslide1down_vx_u32m1(vuint32m1_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down_vx_u32m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslide1down_vx_u32m2(vuint32m2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down_vx_u32m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslide1down_vx_u32m4(vuint32m4_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down_vx_u32m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslide1down_vx_u32m8(vuint32m8_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down_vx_u32m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslide1down_vx_u64m1(vuint64m1_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down_vx_u64m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslide1down_vx_u64m2(vuint64m2_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down_vx_u64m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down_vx_u64m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down_vx_u64m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t src, int8_t value,
+ size_t vl) {
+ return vslide1down_vx_i8mf8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t src, int8_t value,
+ size_t vl) {
+ return vslide1down_vx_i8mf4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t src, int8_t value,
+ size_t vl) {
+ return vslide1down_vx_i8mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t src, int8_t value, size_t vl) {
+ return vslide1down_vx_i8m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t src, int16_t value,
+ size_t vl) {
+ return vslide1down_vx_i16mf4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t src, int16_t value,
+ size_t vl) {
+ return vslide1down_vx_i16mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t src, int16_t value,
+ size_t vl) {
+ return vslide1down_vx_i16m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t src, int16_t value,
+ size_t vl) {
+ return vslide1down_vx_i16m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t src, int16_t value,
+ size_t vl) {
+ return vslide1down_vx_i16m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t src, int16_t value,
+ size_t vl) {
+ return vslide1down_vx_i16m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t src, int32_t value,
+ size_t vl) {
+ return vslide1down_vx_i32mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t src, int32_t value,
+ size_t vl) {
+ return vslide1down_vx_i32m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t src, int32_t value,
+ size_t vl) {
+ return vslide1down_vx_i32m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t src, int32_t value,
+ size_t vl) {
+ return vslide1down_vx_i32m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t src, int32_t value,
+ size_t vl) {
+ return vslide1down_vx_i32m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t src, int64_t value,
+ size_t vl) {
+ return vslide1down_vx_i64m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t src, int64_t value,
+ size_t vl) {
+ return vslide1down_vx_i64m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t src, int64_t value,
+ size_t vl) {
+ return vslide1down_vx_i64m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t src, int64_t value,
+ size_t vl) {
+ return vslide1down_vx_i64m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down_vx_u8mf8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down_vx_u8mf4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down_vx_u8mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down_vx_u8m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down_vx_u8m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down_vx_u8m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t src, uint8_t value,
+ size_t vl) {
+ return vslide1down_vx_u8m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask,
+ vuint16mf4_t maskedoff,
+ vuint16mf4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16mf4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask,
+ vuint16mf2_t maskedoff,
+ vuint16mf2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t src, uint16_t value,
+ size_t vl) {
+ return vslide1down_vx_u16m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask,
+ vuint32mf2_t maskedoff,
+ vuint32mf2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down_vx_u32mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down_vx_u32m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down_vx_u32m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down_vx_u32m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t src, uint32_t value,
+ size_t vl) {
+ return vslide1down_vx_u32m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down_vx_u64m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down_vx_u64m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down_vx_u64m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1down_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t src, uint64_t value,
+ size_t vl) {
+ return vslide1down_vx_u64m8_m(mask, maskedoff, src, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
new file mode 100644
index 000000000000..b1334da4ca1f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c
@@ -0,0 +1,1323 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i32(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslide1up_vx_i8mf8(vint8mf8_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8mf8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i32(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslide1up_vx_i8mf4(vint8mf4_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8mf4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i32(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslide1up_vx_i8mf2(vint8mf2_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i32(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslide1up_vx_i8m1(vint8m1_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i32(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslide1up_vx_i8m2(vint8m2_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i32(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslide1up_vx_i8m4(vint8m4_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i32(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslide1up_vx_i8m8(vint8m8_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslide1up_vx_i16mf4(vint16mf4_t src, int16_t value,
+ size_t vl) {
+ return vslide1up_vx_i16mf4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslide1up_vx_i16mf2(vint16mf2_t src, int16_t value,
+ size_t vl) {
+ return vslide1up_vx_i16mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslide1up_vx_i16m1(vint16m1_t src, int16_t value, size_t vl) {
+ return vslide1up_vx_i16m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslide1up_vx_i16m2(vint16m2_t src, int16_t value, size_t vl) {
+ return vslide1up_vx_i16m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslide1up_vx_i16m4(vint16m4_t src, int16_t value, size_t vl) {
+ return vslide1up_vx_i16m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i32(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslide1up_vx_i16m8(vint16m8_t src, int16_t value, size_t vl) {
+ return vslide1up_vx_i16m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslide1up_vx_i32mf2(vint32mf2_t src, int32_t value,
+ size_t vl) {
+ return vslide1up_vx_i32mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslide1up_vx_i32m1(vint32m1_t src, int32_t value, size_t vl) {
+ return vslide1up_vx_i32m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslide1up_vx_i32m2(vint32m2_t src, int32_t value, size_t vl) {
+ return vslide1up_vx_i32m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslide1up_vx_i32m4(vint32m4_t src, int32_t value, size_t vl) {
+ return vslide1up_vx_i32m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslide1up_vx_i32m8(vint32m8_t src, int32_t value, size_t vl) {
+ return vslide1up_vx_i32m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslide1up_vx_i64m1(vint64m1_t src, int64_t value, size_t vl) {
+ return vslide1up_vx_i64m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslide1up_vx_i64m2(vint64m2_t src, int64_t value, size_t vl) {
+ return vslide1up_vx_i64m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslide1up_vx_i64m4(vint64m4_t src, int64_t value, size_t vl) {
+ return vslide1up_vx_i64m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslide1up_vx_i64m8(vint64m8_t src, int64_t value, size_t vl) {
+ return vslide1up_vx_i64m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i32(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslide1up_vx_u8mf8(vuint8mf8_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8mf8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i32(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslide1up_vx_u8mf4(vuint8mf4_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8mf4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i32(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslide1up_vx_u8mf2(vuint8mf2_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i32(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslide1up_vx_u8m1(vuint8m1_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i32(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslide1up_vx_u8m2(vuint8m2_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i32(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslide1up_vx_u8m4(vuint8m4_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i32(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.nxv64i8.i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslide1up_vx_u8m8(vuint8m8_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i32(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslide1up_vx_u16mf4(vuint16mf4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16mf4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i32(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslide1up_vx_u16mf2(vuint16mf2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i32(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslide1up_vx_u16m1(vuint16m1_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i32(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslide1up_vx_u16m2(vuint16m2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i32(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslide1up_vx_u16m4(vuint16m4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i32(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.nxv32i16.i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslide1up_vx_u16m8(vuint16m8_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i32(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslide1up_vx_u32mf2(vuint32mf2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up_vx_u32mf2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i32(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslide1up_vx_u32m1(vuint32m1_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up_vx_u32m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i32(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslide1up_vx_u32m2(vuint32m2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up_vx_u32m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i32(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslide1up_vx_u32m4(vuint32m4_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up_vx_u32m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i32(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.nxv16i32.i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslide1up_vx_u32m8(vuint32m8_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up_vx_u32m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i32(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.nxv1i64.i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslide1up_vx_u64m1(vuint64m1_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up_vx_u64m1(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i32(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.nxv2i64.i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslide1up_vx_u64m2(vuint64m2_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up_vx_u64m2(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i32(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.nxv4i64.i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up_vx_u64m4(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i32(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.nxv8i64.i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up_vx_u64m8(src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8mf8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8mf4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t src, int8_t value, size_t vl) {
+ return vslide1up_vx_i8m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t src, int16_t value,
+ size_t vl) {
+ return vslide1up_vx_i16mf4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t src, int16_t value,
+ size_t vl) {
+ return vslide1up_vx_i16mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t src, int16_t value, size_t vl) {
+ return vslide1up_vx_i16m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t src, int16_t value, size_t vl) {
+ return vslide1up_vx_i16m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t src, int16_t value, size_t vl) {
+ return vslide1up_vx_i16m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t src, int16_t value, size_t vl) {
+ return vslide1up_vx_i16m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t src, int32_t value,
+ size_t vl) {
+ return vslide1up_vx_i32mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t src, int32_t value, size_t vl) {
+ return vslide1up_vx_i32m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t src, int32_t value, size_t vl) {
+ return vslide1up_vx_i32m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t src, int32_t value, size_t vl) {
+ return vslide1up_vx_i32m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t src, int32_t value, size_t vl) {
+ return vslide1up_vx_i32m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t src, int64_t value, size_t vl) {
+ return vslide1up_vx_i64m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t src, int64_t value, size_t vl) {
+ return vslide1up_vx_i64m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t src, int64_t value, size_t vl) {
+ return vslide1up_vx_i64m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t src, int64_t value, size_t vl) {
+ return vslide1up_vx_i64m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t src, uint8_t value,
+ size_t vl) {
+ return vslide1up_vx_u8mf8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t src, uint8_t value,
+ size_t vl) {
+ return vslide1up_vx_u8mf4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t src, uint8_t value,
+ size_t vl) {
+ return vslide1up_vx_u8mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i8 [[VALUE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t src, uint8_t value, size_t vl) {
+ return vslide1up_vx_u8m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16mf4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i16 [[VALUE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t src, uint16_t value,
+ size_t vl) {
+ return vslide1up_vx_u16m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up_vx_u32mf2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up_vx_u32m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up_vx_u32m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up_vx_u32m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[VALUE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t src, uint32_t value,
+ size_t vl) {
+ return vslide1up_vx_u32m8_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up_vx_u64m1_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up_vx_u64m2_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up_vx_u64m4_m(mask, maskedoff, src, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslide1up_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VALUE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t src, uint64_t value,
+ size_t vl) {
+ return vslide1up_vx_u64m8_m(mask, maskedoff, src, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
new file mode 100644
index 000000000000..46d739ec3401
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
@@ -0,0 +1,1646 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i8mf8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i8mf4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i8mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i8m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i8m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i8m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i8m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i16mf4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i16mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i16m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i16m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i16m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i16m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i32mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i32m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i32m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i32m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i32m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i64m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i64m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i64m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i64m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u8mf8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u8mf4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u8mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u8m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u8m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u8m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u8m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u16mf4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u16mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u16m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u16m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u16m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u16m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u32mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u32m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u32m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u32m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u32m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u64m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u64m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u64m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_u64m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i32(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_f32mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_f32m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i32(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_f32m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i32(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_f32m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i32(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_f32m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_f64m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i32(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_f64m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i32(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_f64m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i32(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_f64m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
+ vint8mf8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i8mf8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
+ vint8mf4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i8mf4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
+ vint8mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i8mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i8m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i8m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i8m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
+ size_t offset, size_t vl) {
+ return vslidedown_vx_i8m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
+ vint16mf4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i16mf4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
+ vint16mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i16mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i16m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
+ vint16m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i16m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
+ vint16m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i16m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
+ vint16m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i16m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
+ vint32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i32mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i32m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
+ vint32m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i32m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
+ vint32m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i32m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
+ vint32m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i32m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i64m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
+ vint64m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i64m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
+ vint64m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i64m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
+ vint64m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_i64m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
+ vuint8mf8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u8mf8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
+ vuint8mf4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u8mf4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
+ vuint8mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u8mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t src, size_t offset, size_t vl) {
+ return vslidedown_vx_u8m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
+ vuint8m2_t src, size_t offset, size_t vl) {
+ return vslidedown_vx_u8m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
+ vuint8m4_t src, size_t offset, size_t vl) {
+ return vslidedown_vx_u8m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
+ vuint8m8_t src, size_t offset, size_t vl) {
+ return vslidedown_vx_u8m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
+ vuint16mf4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u16mf4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
+ vuint16mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u16mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u16m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
+ vuint16m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u16m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
+ vuint16m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u16m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
+ vuint16m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u16m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
+ vuint32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u32mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u32m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
+ vuint32m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u32m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
+ vuint32m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u32m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
+ vuint32m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u32m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u64m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
+ vuint64m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u64m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
+ vuint64m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u64m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
+ vuint64m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_u64m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i32(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
+ vfloat32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_f32mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_f32m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i32(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
+ vfloat32m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_f32m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i32(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
+ vfloat32m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_f32m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i32(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
+ vfloat32m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_f32m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_f64m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i32(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
+ vfloat64m2_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_f64m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i32(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
+ vfloat64m4_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_f64m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslidedown_vx_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i32(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
+ vfloat64m8_t src, size_t offset,
+ size_t vl) {
+ return vslidedown_vx_f64m8_m(mask, dst, src, offset, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
new file mode 100644
index 000000000000..8c6b99e8617c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
@@ -0,0 +1,1631 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i8mf8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i8mf4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i8mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i8m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i8m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i8m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i8m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_i16mf4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_i16mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i16m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i16m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i16m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i16m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_i32mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i32m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i32m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i32m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i32m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i64m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i64m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i64m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i64m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u8mf8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u8mf4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u8mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u8m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u8m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u8m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u8m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u16mf4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u16mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u16m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u16m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u16m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u16m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u32mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u32m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u32m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u32m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u32m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u64m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u64m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u64m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_u64m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i32(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_f32mf2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_f32m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i32(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_f32m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i32(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_f32m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i32(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_f32m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_f64m1(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i32(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_f64m2(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i32(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_f64m4(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i32(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_f64m8(dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
+ vint8mf8_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i8mf8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
+ vint8mf4_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i8mf4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
+ vint8mf2_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i8mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_i8m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_i8m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_i8m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
+ size_t offset, size_t vl) {
+ return vslideup_vx_i8m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
+ vint16mf4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i16mf4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
+ vint16mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i16mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint16m1_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i16m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
+ vint16m2_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i16m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
+ vint16m4_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i16m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
+ vint16m8_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i16m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
+ vint32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_i32mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint32m1_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i32m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
+ vint32m2_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i32m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
+ vint32m4_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i32m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
+ vint32m8_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i32m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint64m1_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i64m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
+ vint64m2_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i64m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
+ vint64m4_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i64m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
+ vint64m8_t src, size_t offset, size_t vl) {
+ return vslideup_vx_i64m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i32(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
+ vuint8mf8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u8mf8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i32(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
+ vuint8mf4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u8mf4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i32(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
+ vuint8mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u8mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+ vuint8m1_t src, size_t offset, size_t vl) {
+ return vslideup_vx_u8m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i32(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
+ vuint8m2_t src, size_t offset, size_t vl) {
+ return vslideup_vx_u8m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i32(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
+ vuint8m4_t src, size_t offset, size_t vl) {
+ return vslideup_vx_u8m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i32(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
+ vuint8m8_t src, size_t offset, size_t vl) {
+ return vslideup_vx_u8m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i32(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
+ vuint16mf4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u16mf4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i32(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
+ vuint16mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u16mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint16m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u16m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i32(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
+ vuint16m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u16m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i32(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
+ vuint16m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u16m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i32(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
+ vuint16m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u16m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i32(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
+ vuint32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u32mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint32m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u32m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i32(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
+ vuint32m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u32m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i32(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
+ vuint32m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u32m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i32(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
+ vuint32m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u32m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint64m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u64m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i32(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
+ vuint64m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u64m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i32(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
+ vuint64m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u64m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i32(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
+ vuint64m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_u64m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i32(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
+ vfloat32mf2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_f32mf2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+ vfloat32m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_f32m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i32(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
+ vfloat32m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_f32m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i32(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
+ vfloat32m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_f32m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i32(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
+ vfloat32m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_f32m8_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+ vfloat64m1_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_f64m1_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i32(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
+ vfloat64m2_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_f64m2_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i32(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
+ vfloat64m4_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_f64m4_m(mask, dst, src, offset, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vslideup_vx_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i32(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i32 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
+ vfloat64m8_t src, size_t offset,
+ size_t vl) {
+ return vslideup_vx_f64m8_m(mask, dst, src, offset, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
new file mode 100644
index 000000000000..be5add398e65
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll_vv_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll_vv_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll_vv_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll_vv_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll_vv_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll_vv_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll_vv_i8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll_vv_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll_vv_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll_vv_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll_vv_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll_vv_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll_vv_i16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll_vv_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll_vv_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll_vv_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll_vv_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll_vv_i32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll_vv_i64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll_vv_i64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll_vv_i64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll_vv_i64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll_vv_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll_vv_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll_vv_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll_vv_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll_vv_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll_vv_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll_vv_u8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll_vv_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll_vv_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll_vv_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll_vv_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll_vv_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll_vv_u16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll_vv_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll_vv_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll_vv_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll_vv_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll_vv_u32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll_vv_u64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll_vv_u64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll_vv_u64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll_vv_u64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll_vv_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll_vv_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll_vv_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll_vv_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll_vv_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll_vv_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll_vv_i8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll_vv_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll_vv_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll_vv_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll_vv_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll_vv_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll_vv_i16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll_vv_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll_vv_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll_vv_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll_vv_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll_vv_i32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll_vv_i64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll_vv_i64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll_vv_i64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll_vv_i64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_i64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsll_vv_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsll_vv_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsll_vv_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsll_vv_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsll_vv_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsll_vv_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsll_vv_u8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsll_vv_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsll_vv_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsll_vv_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsll_vv_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsll_vv_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsll_vv_u16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsll_vv_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsll_vv_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsll_vv_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsll_vv_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsll_vv_u32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsll_vv_u64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsll_vv_u64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsll_vv_u64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsll_vv_u64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsll_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsll_vx_u64m8_m(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
new file mode 100644
index 000000000000..e5d709adc3c1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c
@@ -0,0 +1,1287 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsmul_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsmul_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsmul_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsmul_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsmul_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsmul_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsmul_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsmul_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsmul_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsmul_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsmul_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsmul_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsmul_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsmul_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsmul_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsmul_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsmul_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsmul_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsmul_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsmul_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsmul_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsmul_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsmul_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsmul_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsmul_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsmul_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsmul_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsmul_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsmul_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsmul_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsmul_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsmul_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsmul_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsmul_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsmul_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsmul_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsmul_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsmul_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsmul_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsmul_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsmul_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsmul_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsmul_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsmul_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsmul_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsmul_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsmul_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsmul_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsmul_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsmul_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsmul_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsmul_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsmul_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsmul_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsmul_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vsmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vsmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vsmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsmul_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c
new file mode 100644
index 000000000000..b9af7ca76eab
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c
@@ -0,0 +1,6523 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsoxei8_v_i8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsoxei8_v_i8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsoxei8_v_i8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsoxei8_v_i8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsoxei8_v_i8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value,
+ size_t vl) {
+ return vsoxei8_v_i8m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value,
+ size_t vl) {
+ return vsoxei8_v_i8m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsoxei16_v_i8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsoxei16_v_i8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsoxei16_v_i8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsoxei16_v_i8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsoxei16_v_i8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value,
+ size_t vl) {
+ return vsoxei16_v_i8m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsoxei32_v_i8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsoxei32_v_i8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsoxei32_v_i8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsoxei32_v_i8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsoxei32_v_i8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsoxei64_v_i8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsoxei64_v_i8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsoxei64_v_i8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsoxei64_v_i8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsoxei8_v_i16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value,
+ size_t vl) {
+ return vsoxei8_v_i16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsoxei8_v_i16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsoxei8_v_i16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsoxei8_v_i16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value,
+ size_t vl) {
+ return vsoxei8_v_i16m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsoxei16_v_i16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei16_v_i16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsoxei16_v_i16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsoxei16_v_i16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsoxei16_v_i16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value,
+ size_t vl) {
+ return vsoxei16_v_i16m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsoxei32_v_i16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei32_v_i16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsoxei32_v_i16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsoxei32_v_i16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsoxei32_v_i16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsoxei64_v_i16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei64_v_i16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsoxei64_v_i16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsoxei64_v_i16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsoxei8_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsoxei8_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsoxei8_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsoxei8_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsoxei8_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsoxei16_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsoxei16_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsoxei16_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsoxei16_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsoxei16_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsoxei32_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsoxei32_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsoxei32_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsoxei32_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsoxei32_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsoxei64_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsoxei64_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsoxei64_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsoxei64_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsoxei8_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsoxei8_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsoxei8_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsoxei8_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsoxei16_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsoxei16_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsoxei16_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsoxei16_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsoxei32_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsoxei32_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsoxei32_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsoxei32_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsoxei64_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsoxei64_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsoxei64_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsoxei64_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value,
+ size_t vl) {
+ return vsoxei8_v_u8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsoxei8_v_u8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsoxei8_v_u8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsoxei8_v_u8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsoxei8_v_u8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value,
+ size_t vl) {
+ return vsoxei8_v_u8m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value,
+ size_t vl) {
+ return vsoxei8_v_u8m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei16_v_u8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsoxei16_v_u8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsoxei16_v_u8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsoxei16_v_u8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsoxei16_v_u8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value,
+ size_t vl) {
+ return vsoxei16_v_u8m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei32_v_u8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsoxei32_v_u8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsoxei32_v_u8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsoxei32_v_u8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsoxei32_v_u8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value,
+ size_t vl) {
+ return vsoxei64_v_u8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsoxei64_v_u8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsoxei64_v_u8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsoxei64_v_u8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsoxei8_v_u16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsoxei8_v_u16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value,
+ size_t vl) {
+ return vsoxei8_v_u16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value,
+ size_t vl) {
+ return vsoxei8_v_u16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value,
+ size_t vl) {
+ return vsoxei8_v_u16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value,
+ size_t vl) {
+ return vsoxei8_v_u16m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsoxei16_v_u16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsoxei16_v_u16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei16_v_u16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei16_v_u16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsoxei16_v_u16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsoxei16_v_u16m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsoxei32_v_u16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsoxei32_v_u16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei32_v_u16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei32_v_u16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsoxei32_v_u16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsoxei64_v_u16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsoxei64_v_u16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei64_v_u16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei64_v_u16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsoxei8_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vsoxei8_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value,
+ size_t vl) {
+ return vsoxei8_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value,
+ size_t vl) {
+ return vsoxei8_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value,
+ size_t vl) {
+ return vsoxei8_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsoxei16_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei16_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei16_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei16_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsoxei16_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsoxei32_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei32_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei32_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei32_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsoxei32_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsoxei64_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei64_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei64_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei64_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsoxei8_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vsoxei8_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value,
+ size_t vl) {
+ return vsoxei8_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value,
+ size_t vl) {
+ return vsoxei8_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsoxei16_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei16_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei16_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei16_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsoxei32_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei32_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei32_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei32_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsoxei64_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei64_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei64_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei64_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value,
+ size_t vl) {
+ return vsoxei8_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsoxei8_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsoxei8_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsoxei8_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsoxei8_v_f32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei16_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsoxei16_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsoxei16_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsoxei16_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsoxei16_v_f32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei32_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsoxei32_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsoxei32_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsoxei32_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsoxei32_v_f32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei64_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsoxei64_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsoxei64_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsoxei64_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value,
+ size_t vl) {
+ return vsoxei8_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsoxei8_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsoxei8_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsoxei8_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei16_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsoxei16_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsoxei16_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsoxei16_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei32_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsoxei32_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsoxei32_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsoxei32_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value,
+ size_t vl) {
+ return vsoxei64_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsoxei64_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsoxei64_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsoxei64_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsoxei8_v_i8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsoxei8_v_i8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsoxei8_v_i8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsoxei8_v_i8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsoxei8_v_i8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex,
+ vint8m4_t value, size_t vl) {
+ return vsoxei8_v_i8m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex,
+ vint8m8_t value, size_t vl) {
+ return vsoxei8_v_i8m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsoxei16_v_i8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsoxei16_v_i8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsoxei16_v_i8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsoxei16_v_i8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsoxei16_v_i8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex,
+ vint8m4_t value, size_t vl) {
+ return vsoxei16_v_i8m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsoxei32_v_i8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsoxei32_v_i8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsoxei32_v_i8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsoxei32_v_i8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsoxei32_v_i8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsoxei64_v_i8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsoxei64_v_i8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsoxei64_v_i8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsoxei64_v_i8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsoxei8_v_i16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei8_v_i16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsoxei8_v_i16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsoxei8_v_i16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsoxei8_v_i16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex,
+ vint16m8_t value, size_t vl) {
+ return vsoxei8_v_i16m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base,
+ vuint16mf4_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsoxei16_v_i16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base,
+ vuint16mf2_t bindex, vint16mf2_t value,
+ size_t vl) {
+ return vsoxei16_v_i16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsoxei16_v_i16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsoxei16_v_i16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsoxei16_v_i16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex,
+ vint16m8_t value, size_t vl) {
+ return vsoxei16_v_i16m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base,
+ vuint32mf2_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsoxei32_v_i16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei32_v_i16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsoxei32_v_i16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsoxei32_v_i16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsoxei32_v_i16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsoxei64_v_i16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsoxei64_v_i16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsoxei64_v_i16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsoxei64_v_i16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsoxei8_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsoxei8_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsoxei8_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsoxei8_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsoxei8_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint16mf4_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsoxei16_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsoxei16_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsoxei16_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsoxei16_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsoxei16_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint32mf2_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsoxei32_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsoxei32_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsoxei32_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsoxei32_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsoxei32_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsoxei64_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsoxei64_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsoxei64_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsoxei64_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsoxei8_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsoxei8_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsoxei8_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsoxei8_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsoxei16_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsoxei16_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsoxei16_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsoxei16_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsoxei32_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsoxei32_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsoxei32_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsoxei32_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsoxei64_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsoxei64_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsoxei64_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsoxei64_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei8_v_u8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsoxei8_v_u8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsoxei8_v_u8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsoxei8_v_u8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsoxei8_v_u8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex,
+ vuint8m4_t value, size_t vl) {
+ return vsoxei8_v_u8m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex,
+ vuint8m8_t value, size_t vl) {
+ return vsoxei8_v_u8m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei16_v_u8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsoxei16_v_u8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsoxei16_v_u8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsoxei16_v_u8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsoxei16_v_u8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex,
+ vuint8m4_t value, size_t vl) {
+ return vsoxei16_v_u8m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei32_v_u8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsoxei32_v_u8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsoxei32_v_u8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsoxei32_v_u8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsoxei32_v_u8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsoxei64_v_u8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsoxei64_v_u8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsoxei64_v_u8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsoxei64_v_u8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsoxei8_v_u16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsoxei8_v_u16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei8_v_u16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei8_v_u16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsoxei8_v_u16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsoxei8_v_u16m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint16mf4_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsoxei16_v_u16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint16mf2_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsoxei16_v_u16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei16_v_u16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei16_v_u16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsoxei16_v_u16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsoxei16_v_u16m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint32mf2_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsoxei32_v_u16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint32m1_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsoxei32_v_u16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei32_v_u16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei32_v_u16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsoxei32_v_u16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint64m1_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsoxei64_v_u16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint64m2_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsoxei64_v_u16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsoxei64_v_u16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsoxei64_v_u16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsoxei8_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei8_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei8_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei8_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsoxei8_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint16mf4_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsoxei16_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base,
+ vuint16mf2_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vsoxei16_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei16_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei16_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsoxei16_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint32mf2_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsoxei32_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei32_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei32_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei32_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsoxei32_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint64m1_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsoxei64_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsoxei64_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsoxei64_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsoxei64_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsoxei8_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei8_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei8_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei8_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint16mf4_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsoxei16_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base,
+ vuint16mf2_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vsoxei16_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei16_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei16_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint32mf2_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsoxei32_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei32_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei32_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei32_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsoxei64_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsoxei64_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsoxei64_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsoxei64_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei8_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsoxei8_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsoxei8_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsoxei8_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsoxei8_v_f32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei16_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsoxei16_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsoxei16_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsoxei16_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsoxei16_v_f32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei32_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsoxei32_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsoxei32_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsoxei32_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsoxei32_v_f32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsoxei64_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsoxei64_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsoxei64_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsoxei64_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei8_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsoxei8_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsoxei8_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei8_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsoxei8_v_f64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei16_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsoxei16_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsoxei16_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei16_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsoxei16_v_f64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei32_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsoxei32_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsoxei32_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei32_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsoxei32_v_f64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsoxei64_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsoxei64_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsoxei64_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsoxei64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsoxei64_v_f64m8_m(mask, base, bindex, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
new file mode 100644
index 000000000000..4ad0bcae1e2b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c
@@ -0,0 +1,1240 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsra_vv_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsra_vv_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsra_vv_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsra_vv_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsra_vv_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsra_vv_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsra_vv_i8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsra_vv_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsra_vv_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsra_vv_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsra_vv_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsra_vv_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsra_vv_i16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsra_vv_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsra_vv_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsra_vv_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsra_vv_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsra_vv_i32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsra_vv_i64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsra_vv_i64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsra_vv_i64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsra_vv_i64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsra_vv_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsra_vv_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsra_vv_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsra_vv_i8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsra_vv_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsra_vv_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsra_vv_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsra_vv_i16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsra_vv_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsra_vv_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsra_vv_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsra_vv_i32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsra_vv_i64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsra_vv_i64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsra_vv_i64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsra_vv_i64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsra_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) {
+ return vsra_vx_i64m8_m(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
new file mode 100644
index 000000000000..9c5df425af96
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c
@@ -0,0 +1,1241 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsrl_vv_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsrl_vv_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsrl_vv_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsrl_vv_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsrl_vv_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsrl_vv_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsrl_vv_u8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsrl_vv_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsrl_vv_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsrl_vv_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsrl_vv_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsrl_vv_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsrl_vv_u16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsrl_vv_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsrl_vv_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsrl_vv_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsrl_vv_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsrl_vv_u32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsrl_vv_u64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsrl_vv_u64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsrl_vv_u64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsrl_vv_u64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vsrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vsrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vsrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vsrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vsrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vsrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vsrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) {
+ return vsrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) {
+ return vsrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vsrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vsrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vsrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vsrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) {
+ return vsrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vsrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vsrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vsrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vsrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vsrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vsrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vsrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vsrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsrl_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) {
+ return vsrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c
new file mode 100644
index 000000000000..851ba6fed9cc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c
@@ -0,0 +1,1813 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value,
+ size_t vl) {
+ return vsse8_v_i8mf8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value,
+ size_t vl) {
+ return vsse8_v_i8mf4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value,
+ size_t vl) {
+ return vsse8_v_i8mf2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value,
+ size_t vl) {
+ return vsse8_v_i8m1(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value,
+ size_t vl) {
+ return vsse8_v_i8m2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value,
+ size_t vl) {
+ return vsse8_v_i8m4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value,
+ size_t vl) {
+ return vsse8_v_i8m8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value,
+ size_t vl) {
+ return vsse16_v_i16mf4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value,
+ size_t vl) {
+ return vsse16_v_i16mf2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value,
+ size_t vl) {
+ return vsse16_v_i16m1(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value,
+ size_t vl) {
+ return vsse16_v_i16m2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value,
+ size_t vl) {
+ return vsse16_v_i16m4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value,
+ size_t vl) {
+ return vsse16_v_i16m8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value,
+ size_t vl) {
+ return vsse32_v_i32mf2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value,
+ size_t vl) {
+ return vsse32_v_i32m1(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value,
+ size_t vl) {
+ return vsse32_v_i32m2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value,
+ size_t vl) {
+ return vsse32_v_i32m4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value,
+ size_t vl) {
+ return vsse32_v_i32m8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value,
+ size_t vl) {
+ return vsse64_v_i64m1(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value,
+ size_t vl) {
+ return vsse64_v_i64m2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value,
+ size_t vl) {
+ return vsse64_v_i64m4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value,
+ size_t vl) {
+ return vsse64_v_i64m8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value,
+ size_t vl) {
+ return vsse8_v_u8mf8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value,
+ size_t vl) {
+ return vsse8_v_u8mf4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value,
+ size_t vl) {
+ return vsse8_v_u8mf2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value,
+ size_t vl) {
+ return vsse8_v_u8m1(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value,
+ size_t vl) {
+ return vsse8_v_u8m2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value,
+ size_t vl) {
+ return vsse8_v_u8m4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value,
+ size_t vl) {
+ return vsse8_v_u8m8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value,
+ size_t vl) {
+ return vsse16_v_u16mf4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value,
+ size_t vl) {
+ return vsse16_v_u16mf2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value,
+ size_t vl) {
+ return vsse16_v_u16m1(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value,
+ size_t vl) {
+ return vsse16_v_u16m2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value,
+ size_t vl) {
+ return vsse16_v_u16m4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value,
+ size_t vl) {
+ return vsse16_v_u16m8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value,
+ size_t vl) {
+ return vsse32_v_u32mf2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value,
+ size_t vl) {
+ return vsse32_v_u32m1(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value,
+ size_t vl) {
+ return vsse32_v_u32m2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value,
+ size_t vl) {
+ return vsse32_v_u32m4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value,
+ size_t vl) {
+ return vsse32_v_u32m8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value,
+ size_t vl) {
+ return vsse64_v_u64m1(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value,
+ size_t vl) {
+ return vsse64_v_u64m2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value,
+ size_t vl) {
+ return vsse64_v_u64m4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value,
+ size_t vl) {
+ return vsse64_v_u64m8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1f32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t value,
+ size_t vl) {
+ return vsse32_v_f32mf2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2f32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t value,
+ size_t vl) {
+ return vsse32_v_f32m1(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4f32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t value,
+ size_t vl) {
+ return vsse32_v_f32m2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8f32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t value,
+ size_t vl) {
+ return vsse32_v_f32m4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv16f32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m8(float *base, ptrdiff_t bstride, vfloat32m8_t value,
+ size_t vl) {
+ return vsse32_v_f32m8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv1f64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t value,
+ size_t vl) {
+ return vsse64_v_f64m1(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv2f64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t value,
+ size_t vl) {
+ return vsse64_v_f64m2(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv4f64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t value,
+ size_t vl) {
+ return vsse64_v_f64m4(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.nxv8f64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m8(double *base, ptrdiff_t bstride, vfloat64m8_t value,
+ size_t vl) {
+ return vsse64_v_f64m8(base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8mf8_t value, size_t vl) {
+ return vsse8_v_i8mf8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8mf4_t value, size_t vl) {
+ return vsse8_v_i8mf4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8mf2_t value, size_t vl) {
+ return vsse8_v_i8mf2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8m1_t value, size_t vl) {
+ return vsse8_v_i8m1_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8m2_t value, size_t vl) {
+ return vsse8_v_i8m2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8m4_t value, size_t vl) {
+ return vsse8_v_i8m4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride,
+ vint8m8_t value, size_t vl) {
+ return vsse8_v_i8m8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16mf4_t value, size_t vl) {
+ return vsse16_v_i16mf4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16mf2_t value, size_t vl) {
+ return vsse16_v_i16mf2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16m1_t value, size_t vl) {
+ return vsse16_v_i16m1_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16m2_t value, size_t vl) {
+ return vsse16_v_i16m2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16m4_t value, size_t vl) {
+ return vsse16_v_i16m4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride,
+ vint16m8_t value, size_t vl) {
+ return vsse16_v_i16m8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride,
+ vint32mf2_t value, size_t vl) {
+ return vsse32_v_i32mf2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride,
+ vint32m1_t value, size_t vl) {
+ return vsse32_v_i32m1_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride,
+ vint32m2_t value, size_t vl) {
+ return vsse32_v_i32m2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride,
+ vint32m4_t value, size_t vl) {
+ return vsse32_v_i32m4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride,
+ vint32m8_t value, size_t vl) {
+ return vsse32_v_i32m8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride,
+ vint64m1_t value, size_t vl) {
+ return vsse64_v_i64m1_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride,
+ vint64m2_t value, size_t vl) {
+ return vsse64_v_i64m2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride,
+ vint64m4_t value, size_t vl) {
+ return vsse64_v_i64m4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride,
+ vint64m8_t value, size_t vl) {
+ return vsse64_v_i64m8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8mf8_t value, size_t vl) {
+ return vsse8_v_u8mf8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8mf4_t value, size_t vl) {
+ return vsse8_v_u8mf4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8mf2_t value, size_t vl) {
+ return vsse8_v_u8mf2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8m1_t value, size_t vl) {
+ return vsse8_v_u8m1_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8m2_t value, size_t vl) {
+ return vsse8_v_u8m2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8m4_t value, size_t vl) {
+ return vsse8_v_u8m4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptrdiff_t bstride,
+ vuint8m8_t value, size_t vl) {
+ return vsse8_v_u8m8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16mf4_t value, size_t vl) {
+ return vsse16_v_u16mf4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16mf2_t value, size_t vl) {
+ return vsse16_v_u16mf2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16m1_t value, size_t vl) {
+ return vsse16_v_u16m1_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16m2_t value, size_t vl) {
+ return vsse16_v_u16m2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16m4_t value, size_t vl) {
+ return vsse16_v_u16m4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptrdiff_t bstride,
+ vuint16m8_t value, size_t vl) {
+ return vsse16_v_u16m8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride,
+ vuint32mf2_t value, size_t vl) {
+ return vsse32_v_u32mf2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride,
+ vuint32m1_t value, size_t vl) {
+ return vsse32_v_u32m1_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride,
+ vuint32m2_t value, size_t vl) {
+ return vsse32_v_u32m2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride,
+ vuint32m4_t value, size_t vl) {
+ return vsse32_v_u32m4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptrdiff_t bstride,
+ vuint32m8_t value, size_t vl) {
+ return vsse32_v_u32m8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride,
+ vuint64m1_t value, size_t vl) {
+ return vsse64_v_u64m1_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride,
+ vuint64m2_t value, size_t vl) {
+ return vsse64_v_u64m2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride,
+ vuint64m4_t value, size_t vl) {
+ return vsse64_v_u64m4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride,
+ vuint64m8_t value, size_t vl) {
+ return vsse64_v_u64m8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1f32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride,
+ vfloat32mf2_t value, size_t vl) {
+ return vsse32_v_f32mf2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2f32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride,
+ vfloat32m1_t value, size_t vl) {
+ return vsse32_v_f32m1_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4f32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride,
+ vfloat32m2_t value, size_t vl) {
+ return vsse32_v_f32m2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8f32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride,
+ vfloat32m4_t value, size_t vl) {
+ return vsse32_v_f32m4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv16f32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptrdiff_t bstride,
+ vfloat32m8_t value, size_t vl) {
+ return vsse32_v_f32m8_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv1f64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride,
+ vfloat64m1_t value, size_t vl) {
+ return vsse64_v_f64m1_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv2f64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride,
+ vfloat64m2_t value, size_t vl) {
+ return vsse64_v_f64m2_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv4f64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride,
+ vfloat64m4_t value, size_t vl) {
+ return vsse64_v_f64m4_m(mask, base, bstride, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsse64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsse.mask.nxv8f64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], i32 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsse64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], i64 [[BSTRIDE:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride,
+ vfloat64m8_t value, size_t vl) {
+ return vsse64_v_f64m8_m(mask, base, bstride, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
new file mode 100644
index 000000000000..717070e4b6b8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c
@@ -0,0 +1,1290 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssra_vv_i8mf8(vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vssra_vv_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssra_vx_i8mf8(vint8mf8_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssra_vv_i8mf4(vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vssra_vv_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssra_vx_i8mf4(vint8mf4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssra_vv_i8mf2(vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vssra_vv_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssra_vx_i8mf2(vint8mf2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssra_vv_i8m1(vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vssra_vv_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssra_vx_i8m1(vint8m1_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssra_vv_i8m2(vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vssra_vv_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssra_vx_i8m2(vint8m2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssra_vv_i8m4(vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vssra_vv_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssra_vx_i8m4(vint8m4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssra_vv_i8m8(vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vssra_vv_i8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssra_vx_i8m8(vint8m8_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssra_vv_i16mf4(vint16mf4_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vssra_vv_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssra_vx_i16mf4(vint16mf4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssra_vv_i16mf2(vint16mf2_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vssra_vv_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssra_vx_i16mf2(vint16mf2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssra_vv_i16m1(vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vssra_vv_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssra_vx_i16m1(vint16m1_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssra_vv_i16m2(vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vssra_vv_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssra_vx_i16m2(vint16m2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssra_vv_i16m4(vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vssra_vv_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssra_vx_i16m4(vint16m4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssra_vv_i16m8(vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vssra_vv_i16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssra_vx_i16m8(vint16m8_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssra_vv_i32mf2(vint32mf2_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vssra_vv_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssra_vx_i32mf2(vint32mf2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssra_vv_i32m1(vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vssra_vv_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssra_vx_i32m1(vint32m1_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssra_vv_i32m2(vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vssra_vv_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssra_vx_i32m2(vint32m2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssra_vv_i32m4(vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vssra_vv_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssra_vx_i32m4(vint32m4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssra_vv_i32m8(vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vssra_vv_i32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssra_vx_i32m8(vint32m8_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssra_vv_i64m1(vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vssra_vv_i64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssra_vx_i64m1(vint64m1_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssra_vv_i64m2(vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vssra_vv_i64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssra_vx_i64m2(vint64m2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssra_vv_i64m4(vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vssra_vv_i64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssra_vx_i64m4(vint64m4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vssra_vv_i64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vssra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssra.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vssra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssra.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vssra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssra.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vssra_vv_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssra.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vssra_vv_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssra.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vssra_vv_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssra.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vssra_vv_i8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssra.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vssra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssra.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vssra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssra.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vssra_vv_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssra.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vssra_vv_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssra.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vssra_vv_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssra.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vssra_vv_i16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssra.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vssra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssra.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vssra_vv_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssra.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vssra_vv_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssra.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vssra_vv_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssra.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vssra_vv_i32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssra.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vssra_vv_i64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssra.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vssra_vv_i64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssra.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vssra_vv_i64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssra.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vssra_vv_i64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssra_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssra.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, size_t shift, size_t vl) {
+ return vssra_vx_i64m8_m(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
new file mode 100644
index 000000000000..11b918bad79a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c
@@ -0,0 +1,1305 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssrl_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) {
+ return vssrl_vv_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i32.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.nxv1i8.i64.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssrl_vx_u8mf8(vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8mf8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssrl_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) {
+ return vssrl_vv_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i32.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.nxv2i8.i64.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssrl_vx_u8mf4(vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssrl_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) {
+ return vssrl_vv_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i32.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.nxv4i8.i64.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssrl_vx_u8mf2(vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssrl_vv_u8m1(vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vssrl_vv_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i32.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.nxv8i8.i64.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssrl_vx_u8m1(vuint8m1_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssrl_vv_u8m2(vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vssrl_vv_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i32.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.nxv16i8.i64.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssrl_vx_u8m2(vuint8m2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssrl_vv_u8m4(vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vssrl_vv_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i32.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.nxv32i8.i64.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssrl_vx_u8m4(vuint8m4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssrl_vv_u8m8(vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vssrl_vv_u8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i32.i32(<vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.nxv64i8.i64.i64(<vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssrl_vx_u8m8(vuint8m8_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssrl_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vssrl_vv_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i32.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.nxv1i16.i64.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssrl_vx_u16mf4(vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16mf4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssrl_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vssrl_vv_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i32.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.nxv2i16.i64.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssrl_vx_u16mf2(vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssrl_vv_u16m1(vuint16m1_t op1, vuint16m1_t shift, size_t vl) {
+ return vssrl_vv_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i32.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.nxv4i16.i64.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssrl_vx_u16m1(vuint16m1_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssrl_vv_u16m2(vuint16m2_t op1, vuint16m2_t shift, size_t vl) {
+ return vssrl_vv_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i32.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.nxv8i16.i64.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssrl_vx_u16m2(vuint16m2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssrl_vv_u16m4(vuint16m4_t op1, vuint16m4_t shift, size_t vl) {
+ return vssrl_vv_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i32.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.nxv16i16.i64.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssrl_vx_u16m4(vuint16m4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssrl_vv_u16m8(vuint16m8_t op1, vuint16m8_t shift, size_t vl) {
+ return vssrl_vv_u16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i32.i32(<vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.nxv32i16.i64.i64(<vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssrl_vx_u16m8(vuint16m8_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssrl_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vssrl_vv_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.nxv1i32.i64.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssrl_vx_u32mf2(vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u32mf2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssrl_vv_u32m1(vuint32m1_t op1, vuint32m1_t shift, size_t vl) {
+ return vssrl_vv_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.nxv2i32.i64.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssrl_vx_u32m1(vuint32m1_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u32m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssrl_vv_u32m2(vuint32m2_t op1, vuint32m2_t shift, size_t vl) {
+ return vssrl_vv_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.nxv4i32.i64.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssrl_vx_u32m2(vuint32m2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u32m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssrl_vv_u32m4(vuint32m4_t op1, vuint32m4_t shift, size_t vl) {
+ return vssrl_vv_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.nxv8i32.i64.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssrl_vx_u32m4(vuint32m4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u32m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssrl_vv_u32m8(vuint32m8_t op1, vuint32m8_t shift, size_t vl) {
+ return vssrl_vv_u32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.nxv16i32.i64.i64(<vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssrl_vx_u32m8(vuint32m8_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u32m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssrl_vv_u64m1(vuint64m1_t op1, vuint64m1_t shift, size_t vl) {
+ return vssrl_vv_u64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssrl_vx_u64m1(vuint64m1_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u64m1(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssrl_vv_u64m2(vuint64m2_t op1, vuint64m2_t shift, size_t vl) {
+ return vssrl_vv_u64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssrl_vx_u64m2(vuint64m2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u64m2(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssrl_vv_u64m4(vuint64m4_t op1, vuint64m4_t shift, size_t vl) {
+ return vssrl_vv_u64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssrl_vx_u64m4(vuint64m4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u64m4(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) {
+ return vssrl_vv_u64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u64m8(op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t shift,
+ size_t vl) {
+ return vssrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i32.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssrl.mask.nxv1i8.i64.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t shift,
+ size_t vl) {
+ return vssrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i32.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssrl.mask.nxv2i8.i64.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t shift,
+ size_t vl) {
+ return vssrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i32.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssrl.mask.nxv4i8.i64.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t shift, size_t vl) {
+ return vssrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i32.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssrl.mask.nxv8i8.i64.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t shift, size_t vl) {
+ return vssrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i32.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssrl.mask.nxv16i8.i64.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t shift, size_t vl) {
+ return vssrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i32.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssrl.mask.nxv32i8.i64.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t shift, size_t vl) {
+ return vssrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i32.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssrl.mask.nxv64i8.i64.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t shift,
+ size_t vl) {
+ return vssrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i32.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssrl.mask.nxv1i16.i64.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t shift,
+ size_t vl) {
+ return vssrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i32.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssrl.mask.nxv2i16.i64.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t shift,
+ size_t vl) {
+ return vssrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i32.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssrl.mask.nxv4i16.i64.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t shift,
+ size_t vl) {
+ return vssrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i32.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssrl.mask.nxv8i16.i64.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t shift,
+ size_t vl) {
+ return vssrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i32.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssrl.mask.nxv16i16.i64.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t shift,
+ size_t vl) {
+ return vssrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i32.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssrl.mask.nxv32i16.i64.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t shift,
+ size_t vl) {
+ return vssrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssrl.mask.nxv1i32.i64.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t shift,
+ size_t vl) {
+ return vssrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssrl.mask.nxv2i32.i64.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t shift,
+ size_t vl) {
+ return vssrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssrl.mask.nxv4i32.i64.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t shift,
+ size_t vl) {
+ return vssrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssrl.mask.nxv8i32.i64.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t shift,
+ size_t vl) {
+ return vssrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssrl.mask.nxv16i32.i64.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t shift,
+ size_t vl) {
+ return vssrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssrl.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t shift,
+ size_t vl) {
+ return vssrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssrl.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t shift,
+ size_t vl) {
+ return vssrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssrl.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t shift,
+ size_t vl) {
+ return vssrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssrl_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssrl.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[SHIFT:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, size_t shift, size_t vl) {
+ return vssrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
new file mode 100644
index 000000000000..de2ae7a9e91b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c
@@ -0,0 +1,2587 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vssub_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vssub_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vssub_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vssub_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vssub_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vssub_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vssub_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vssub_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vssub_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vssub_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vssub_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vssub_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vssub_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vssub_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vssub_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vssub_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vssub_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vssub_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vssub_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vssub_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vssub_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vssub_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vssub_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vssub_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vssub_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vssub_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vssub_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vssub_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vssub_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vssub_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vssub_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssubu_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vssubu_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssubu_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssubu_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vssubu_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssubu_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssubu_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vssubu_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssubu_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssubu_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vssubu_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssubu_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssubu_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vssubu_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssubu_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssubu_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vssubu_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssubu_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssubu_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vssubu_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssubu_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssubu_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vssubu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssubu_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vssubu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssubu_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vssubu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssubu_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vssubu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssubu_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vssubu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssubu_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vssubu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssubu_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vssubu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssubu_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vssubu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssubu_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vssubu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssubu_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vssubu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssubu_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vssubu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssubu_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vssubu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssubu_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vssubu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssubu_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vssubu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssubu_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vssubu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssubu_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vssubu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssubu_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vssubu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssubu_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vssubu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssubu_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vssubu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssubu_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vssubu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssubu_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vssubu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssubu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vssubu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssubu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vssubu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssubu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vssubu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssubu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vssubu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssubu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vssubu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssubu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vssubu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssubu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vssubu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vssubu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vssubu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vssub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vssub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vssub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vssub_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vssub_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vssub_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vssub_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff,
+ vint8m8_t op1, int8_t op2, size_t vl) {
+ return vssub_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vssub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vssub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vssub_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vssub_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vssub_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vssub_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int16_t op2, size_t vl) {
+ return vssub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vssub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vssub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vssub_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vssub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vssub_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vssub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vssub_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vssub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vssub_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int32_t op2, size_t vl) {
+ return vssub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vssub_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int64_t op2, size_t vl) {
+ return vssub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vssub_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int64_t op2, size_t vl) {
+ return vssub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vssub_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int64_t op2, size_t vl) {
+ return vssub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vssub_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int64_t op2, size_t vl) {
+ return vssub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vssubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vssubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vssubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vssubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vssubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vssubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vssubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
+ vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vssubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vssubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vssubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vssubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vssubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vssubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vssubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vssubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vssubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vssubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vssubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint16m8_t op2,
+ size_t vl) {
+ return vssubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vssubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vssubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2,
+ size_t vl) {
+ return vssubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vssubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vssubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vssubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vssubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vssubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vssubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint32m8_t op2,
+ size_t vl) {
+ return vssubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vssubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint64m1_t op2,
+ size_t vl) {
+ return vssubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vssubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint64m2_t op2,
+ size_t vl) {
+ return vssubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vssubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint64m4_t op2,
+ size_t vl) {
+ return vssubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vssubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint64m8_t op2,
+ size_t vl) {
+ return vssubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vssubu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vssubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
new file mode 100644
index 000000000000..4ac644ebbbfd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsub_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsub_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsub_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsub_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsub_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsub_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsub_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsub_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsub_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsub_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsub_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsub_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsub_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsub_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsub_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsub_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsub_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsub_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsub_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsub_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsub_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsub_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vsub_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vsub_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vsub_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsub_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsub_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsub_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsub_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vsub_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vsub_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vsub_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vsub_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vsub_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vsub_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vsub_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vsub_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vsub_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vsub_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vsub_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vsub_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vsub_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vsub_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vsub_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vsub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vsub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vsub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vsub_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vsub_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vsub_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vsub_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vsub_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vsub_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vsub_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vsub_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vsub_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vsub_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vsub_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vsub_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vsub_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vsub_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vsub_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vsub_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vsub_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vsub_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vsub_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vsub_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vsub_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vsub_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vsub_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vsub_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vsub_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vsub_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsub_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c
new file mode 100644
index 000000000000..593cccb25d2a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c
@@ -0,0 +1,6523 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsuxei8_v_i8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsuxei8_v_i8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsuxei8_v_i8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsuxei8_v_i8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsuxei8_v_i8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value,
+ size_t vl) {
+ return vsuxei8_v_i8m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value,
+ size_t vl) {
+ return vsuxei8_v_i8m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsuxei16_v_i8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsuxei16_v_i8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsuxei16_v_i8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsuxei16_v_i8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsuxei16_v_i8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value,
+ size_t vl) {
+ return vsuxei16_v_i8m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsuxei32_v_i8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsuxei32_v_i8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsuxei32_v_i8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsuxei32_v_i8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value,
+ size_t vl) {
+ return vsuxei32_v_i8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value,
+ size_t vl) {
+ return vsuxei64_v_i8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value,
+ size_t vl) {
+ return vsuxei64_v_i8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value,
+ size_t vl) {
+ return vsuxei64_v_i8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value,
+ size_t vl) {
+ return vsuxei64_v_i8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsuxei8_v_i16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value,
+ size_t vl) {
+ return vsuxei8_v_i16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsuxei8_v_i16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsuxei8_v_i16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsuxei8_v_i16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value,
+ size_t vl) {
+ return vsuxei8_v_i16m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsuxei16_v_i16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei16_v_i16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsuxei16_v_i16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsuxei16_v_i16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsuxei16_v_i16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value,
+ size_t vl) {
+ return vsuxei16_v_i16m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsuxei32_v_i16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei32_v_i16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsuxei32_v_i16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsuxei32_v_i16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value,
+ size_t vl) {
+ return vsuxei32_v_i16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsuxei64_v_i16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei64_v_i16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value,
+ size_t vl) {
+ return vsuxei64_v_i16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value,
+ size_t vl) {
+ return vsuxei64_v_i16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsuxei8_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsuxei8_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsuxei8_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsuxei8_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsuxei8_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsuxei16_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsuxei16_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsuxei16_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsuxei16_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsuxei16_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsuxei32_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsuxei32_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsuxei32_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsuxei32_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value,
+ size_t vl) {
+ return vsuxei32_v_i32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsuxei64_v_i32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value,
+ size_t vl) {
+ return vsuxei64_v_i32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value,
+ size_t vl) {
+ return vsuxei64_v_i32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value,
+ size_t vl) {
+ return vsuxei64_v_i32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsuxei8_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsuxei8_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsuxei8_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsuxei8_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsuxei16_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsuxei16_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsuxei16_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsuxei16_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsuxei32_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsuxei32_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsuxei32_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsuxei32_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value,
+ size_t vl) {
+ return vsuxei64_v_i64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value,
+ size_t vl) {
+ return vsuxei64_v_i64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value,
+ size_t vl) {
+ return vsuxei64_v_i64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value,
+ size_t vl) {
+ return vsuxei64_v_i64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value,
+ size_t vl) {
+ return vsuxei8_v_u8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsuxei8_v_u8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsuxei8_v_u8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsuxei8_v_u8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsuxei8_v_u8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value,
+ size_t vl) {
+ return vsuxei8_v_u8m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value,
+ size_t vl) {
+ return vsuxei8_v_u8m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei16_v_u8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsuxei16_v_u8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsuxei16_v_u8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsuxei16_v_u8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsuxei16_v_u8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value,
+ size_t vl) {
+ return vsuxei16_v_u8m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei32_v_u8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsuxei32_v_u8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsuxei32_v_u8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsuxei32_v_u8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value,
+ size_t vl) {
+ return vsuxei32_v_u8m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value,
+ size_t vl) {
+ return vsuxei64_v_u8mf8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value,
+ size_t vl) {
+ return vsuxei64_v_u8mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value,
+ size_t vl) {
+ return vsuxei64_v_u8mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value,
+ size_t vl) {
+ return vsuxei64_v_u8m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsuxei8_v_u16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsuxei8_v_u16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value,
+ size_t vl) {
+ return vsuxei8_v_u16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value,
+ size_t vl) {
+ return vsuxei8_v_u16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value,
+ size_t vl) {
+ return vsuxei8_v_u16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value,
+ size_t vl) {
+ return vsuxei8_v_u16m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsuxei16_v_u16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsuxei16_v_u16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei16_v_u16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei16_v_u16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsuxei16_v_u16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsuxei16_v_u16m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsuxei32_v_u16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsuxei32_v_u16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei32_v_u16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei32_v_u16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsuxei32_v_u16m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsuxei64_v_u16mf4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsuxei64_v_u16mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei64_v_u16m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei64_v_u16m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsuxei8_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vsuxei8_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value,
+ size_t vl) {
+ return vsuxei8_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value,
+ size_t vl) {
+ return vsuxei8_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value,
+ size_t vl) {
+ return vsuxei8_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsuxei16_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei16_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei16_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei16_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsuxei16_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsuxei32_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei32_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei32_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei32_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsuxei32_v_u32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsuxei64_v_u32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei64_v_u32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei64_v_u32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei64_v_u32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsuxei8_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vsuxei8_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value,
+ size_t vl) {
+ return vsuxei8_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value,
+ size_t vl) {
+ return vsuxei8_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsuxei16_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei16_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei16_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei16_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsuxei32_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei32_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei32_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei32_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsuxei64_v_u64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei64_v_u64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei64_v_u64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei64_v_u64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value,
+ size_t vl) {
+ return vsuxei8_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsuxei8_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsuxei8_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsuxei8_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsuxei8_v_f32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei16_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsuxei16_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsuxei16_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsuxei16_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsuxei16_v_f32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei32_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsuxei32_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsuxei32_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsuxei32_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value,
+ size_t vl) {
+ return vsuxei32_v_f32m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei64_v_f32mf2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value,
+ size_t vl) {
+ return vsuxei64_v_f32m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value,
+ size_t vl) {
+ return vsuxei64_v_f32m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value,
+ size_t vl) {
+ return vsuxei64_v_f32m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value,
+ size_t vl) {
+ return vsuxei8_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsuxei8_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsuxei8_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsuxei8_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei16_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsuxei16_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsuxei16_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsuxei16_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei32_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsuxei32_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsuxei32_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsuxei32_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value,
+ size_t vl) {
+ return vsuxei64_v_f64m1(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value,
+ size_t vl) {
+ return vsuxei64_v_f64m2(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value,
+ size_t vl) {
+ return vsuxei64_v_f64m4(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value,
+ size_t vl) {
+ return vsuxei64_v_f64m8(base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsuxei8_v_i8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsuxei8_v_i8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsuxei8_v_i8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsuxei8_v_i8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsuxei8_v_i8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex,
+ vint8m4_t value, size_t vl) {
+ return vsuxei8_v_i8m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex,
+ vint8m8_t value, size_t vl) {
+ return vsuxei8_v_i8m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsuxei16_v_i8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsuxei16_v_i8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsuxei16_v_i8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsuxei16_v_i8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsuxei16_v_i8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex,
+ vint8m4_t value, size_t vl) {
+ return vsuxei16_v_i8m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsuxei32_v_i8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsuxei32_v_i8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsuxei32_v_i8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsuxei32_v_i8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex,
+ vint8m2_t value, size_t vl) {
+ return vsuxei32_v_i8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex,
+ vint8mf8_t value, size_t vl) {
+ return vsuxei64_v_i8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex,
+ vint8mf4_t value, size_t vl) {
+ return vsuxei64_v_i8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex,
+ vint8mf2_t value, size_t vl) {
+ return vsuxei64_v_i8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex,
+ vint8m1_t value, size_t vl) {
+ return vsuxei64_v_i8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsuxei8_v_i16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei8_v_i16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsuxei8_v_i16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsuxei8_v_i16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsuxei8_v_i16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex,
+ vint16m8_t value, size_t vl) {
+ return vsuxei8_v_i16m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base,
+ vuint16mf4_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsuxei16_v_i16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base,
+ vuint16mf2_t bindex, vint16mf2_t value,
+ size_t vl) {
+ return vsuxei16_v_i16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsuxei16_v_i16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsuxei16_v_i16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsuxei16_v_i16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex,
+ vint16m8_t value, size_t vl) {
+ return vsuxei16_v_i16m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base,
+ vuint32mf2_t bindex, vint16mf4_t value,
+ size_t vl) {
+ return vsuxei32_v_i16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei32_v_i16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsuxei32_v_i16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsuxei32_v_i16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex,
+ vint16m4_t value, size_t vl) {
+ return vsuxei32_v_i16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex,
+ vint16mf4_t value, size_t vl) {
+ return vsuxei64_v_i16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex,
+ vint16mf2_t value, size_t vl) {
+ return vsuxei64_v_i16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex,
+ vint16m1_t value, size_t vl) {
+ return vsuxei64_v_i16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex,
+ vint16m2_t value, size_t vl) {
+ return vsuxei64_v_i16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsuxei8_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsuxei8_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsuxei8_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsuxei8_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsuxei8_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint16mf4_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsuxei16_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsuxei16_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsuxei16_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsuxei16_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsuxei16_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base,
+ vuint32mf2_t bindex, vint32mf2_t value,
+ size_t vl) {
+ return vsuxei32_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsuxei32_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsuxei32_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsuxei32_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex,
+ vint32m8_t value, size_t vl) {
+ return vsuxei32_v_i32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex,
+ vint32mf2_t value, size_t vl) {
+ return vsuxei64_v_i32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex,
+ vint32m1_t value, size_t vl) {
+ return vsuxei64_v_i32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex,
+ vint32m2_t value, size_t vl) {
+ return vsuxei64_v_i32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex,
+ vint32m4_t value, size_t vl) {
+ return vsuxei64_v_i32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsuxei8_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsuxei8_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsuxei8_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsuxei8_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsuxei16_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsuxei16_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsuxei16_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsuxei16_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsuxei32_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsuxei32_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsuxei32_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsuxei32_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex,
+ vint64m1_t value, size_t vl) {
+ return vsuxei64_v_i64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex,
+ vint64m2_t value, size_t vl) {
+ return vsuxei64_v_i64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex,
+ vint64m4_t value, size_t vl) {
+ return vsuxei64_v_i64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex,
+ vint64m8_t value, size_t vl) {
+ return vsuxei64_v_i64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei8_v_u8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsuxei8_v_u8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsuxei8_v_u8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsuxei8_v_u8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsuxei8_v_u8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex,
+ vuint8m4_t value, size_t vl) {
+ return vsuxei8_v_u8m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 64 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VALUE:%.*]], <vscale x 64 x i8>* [[TMP0]], <vscale x 64 x i8> [[BINDEX:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex,
+ vuint8m8_t value, size_t vl) {
+ return vsuxei8_v_u8m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei16_v_u8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsuxei16_v_u8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsuxei16_v_u8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsuxei16_v_u8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsuxei16_v_u8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i32(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 32 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64(<vscale x 32 x i8> [[VALUE:%.*]], <vscale x 32 x i8>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex,
+ vuint8m4_t value, size_t vl) {
+ return vsuxei16_v_u8m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei32_v_u8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsuxei32_v_u8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsuxei32_v_u8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsuxei32_v_u8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i32(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 16 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64(<vscale x 16 x i8> [[VALUE:%.*]], <vscale x 16 x i8>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex,
+ vuint8m2_t value, size_t vl) {
+ return vsuxei32_v_u8m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i32(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 1 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64(<vscale x 1 x i8> [[VALUE:%.*]], <vscale x 1 x i8>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex,
+ vuint8mf8_t value, size_t vl) {
+ return vsuxei64_v_u8mf8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i32(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 2 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64(<vscale x 2 x i8> [[VALUE:%.*]], <vscale x 2 x i8>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex,
+ vuint8mf4_t value, size_t vl) {
+ return vsuxei64_v_u8mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i32(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 4 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64(<vscale x 4 x i8> [[VALUE:%.*]], <vscale x 4 x i8>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex,
+ vuint8mf2_t value, size_t vl) {
+ return vsuxei64_v_u8mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i32(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to <vscale x 8 x i8>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64(<vscale x 8 x i8> [[VALUE:%.*]], <vscale x 8 x i8>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex,
+ vuint8m1_t value, size_t vl) {
+ return vsuxei64_v_u8m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex,
+ vuint16mf4_t value, size_t vl) {
+ return vsuxei8_v_u16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex,
+ vuint16mf2_t value, size_t vl) {
+ return vsuxei8_v_u16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei8_v_u16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei8_v_u16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsuxei8_v_u16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i8> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsuxei8_v_u16m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint16mf4_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsuxei16_v_u16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint16mf2_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsuxei16_v_u16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei16_v_u16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei16_v_u16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsuxei16_v_u16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 32 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VALUE:%.*]], <vscale x 32 x i16>* [[TMP0]], <vscale x 32 x i16> [[BINDEX:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex,
+ vuint16m8_t value, size_t vl) {
+ return vsuxei16_v_u16m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint32mf2_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsuxei32_v_u16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint32m1_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsuxei32_v_u16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei32_v_u16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei32_v_u16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i32(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 16 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64(<vscale x 16 x i16> [[VALUE:%.*]], <vscale x 16 x i16>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex,
+ vuint16m4_t value, size_t vl) {
+ return vsuxei32_v_u16m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i32(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 1 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64(<vscale x 1 x i16> [[VALUE:%.*]], <vscale x 1 x i16>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base,
+ vuint64m1_t bindex, vuint16mf4_t value,
+ size_t vl) {
+ return vsuxei64_v_u16mf4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i32(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 2 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64(<vscale x 2 x i16> [[VALUE:%.*]], <vscale x 2 x i16>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base,
+ vuint64m2_t bindex, vuint16mf2_t value,
+ size_t vl) {
+ return vsuxei64_v_u16mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i32(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 4 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64(<vscale x 4 x i16> [[VALUE:%.*]], <vscale x 4 x i16>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex,
+ vuint16m1_t value, size_t vl) {
+ return vsuxei64_v_u16m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i32(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to <vscale x 8 x i16>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64(<vscale x 8 x i16> [[VALUE:%.*]], <vscale x 8 x i16>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex,
+ vuint16m2_t value, size_t vl) {
+ return vsuxei64_v_u16m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex,
+ vuint32mf2_t value, size_t vl) {
+ return vsuxei8_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei8_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei8_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei8_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsuxei8_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint16mf4_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsuxei16_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base,
+ vuint16mf2_t bindex, vuint32m1_t value,
+ size_t vl) {
+ return vsuxei16_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei16_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei16_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsuxei16_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint32mf2_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsuxei32_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei32_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei32_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei32_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 16 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VALUE:%.*]], <vscale x 16 x i32>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex,
+ vuint32m8_t value, size_t vl) {
+ return vsuxei32_v_u32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i32(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 1 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64(<vscale x 1 x i32> [[VALUE:%.*]], <vscale x 1 x i32>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base,
+ vuint64m1_t bindex, vuint32mf2_t value,
+ size_t vl) {
+ return vsuxei64_v_u32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i32(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 2 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64(<vscale x 2 x i32> [[VALUE:%.*]], <vscale x 2 x i32>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex,
+ vuint32m1_t value, size_t vl) {
+ return vsuxei64_v_u32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i32(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 4 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64(<vscale x 4 x i32> [[VALUE:%.*]], <vscale x 4 x i32>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex,
+ vuint32m2_t value, size_t vl) {
+ return vsuxei64_v_u32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i32(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to <vscale x 8 x i32>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64(<vscale x 8 x i32> [[VALUE:%.*]], <vscale x 8 x i32>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex,
+ vuint32m4_t value, size_t vl) {
+ return vsuxei64_v_u32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsuxei8_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei8_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei8_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei8_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint16mf4_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsuxei16_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base,
+ vuint16mf2_t bindex, vuint64m2_t value,
+ size_t vl) {
+ return vsuxei16_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei16_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei16_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base,
+ vuint32mf2_t bindex, vuint64m1_t value,
+ size_t vl) {
+ return vsuxei32_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei32_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei32_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei32_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 1 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VALUE:%.*]], <vscale x 1 x i64>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex,
+ vuint64m1_t value, size_t vl) {
+ return vsuxei64_v_u64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 2 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VALUE:%.*]], <vscale x 2 x i64>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex,
+ vuint64m2_t value, size_t vl) {
+ return vsuxei64_v_u64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 4 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VALUE:%.*]], <vscale x 4 x i64>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex,
+ vuint64m4_t value, size_t vl) {
+ return vsuxei64_v_u64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to <vscale x 8 x i64>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VALUE:%.*]], <vscale x 8 x i64>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex,
+ vuint64m8_t value, size_t vl) {
+ return vsuxei64_v_u64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei8_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsuxei8_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsuxei8_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsuxei8_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i8> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsuxei8_v_f32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei16_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsuxei16_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsuxei16_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsuxei16_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i16> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsuxei16_v_f32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei32_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsuxei32_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsuxei32_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsuxei32_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32.i32(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 16 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32.i64(<vscale x 16 x float> [[VALUE:%.*]], <vscale x 16 x float>* [[TMP0]], <vscale x 16 x i32> [[BINDEX:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex,
+ vfloat32m8_t value, size_t vl) {
+ return vsuxei32_v_f32m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64.i32(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 1 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64.i64(<vscale x 1 x float> [[VALUE:%.*]], <vscale x 1 x float>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex,
+ vfloat32mf2_t value, size_t vl) {
+ return vsuxei64_v_f32mf2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64.i32(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 2 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64.i64(<vscale x 2 x float> [[VALUE:%.*]], <vscale x 2 x float>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex,
+ vfloat32m1_t value, size_t vl) {
+ return vsuxei64_v_f32m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64.i32(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 4 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64.i64(<vscale x 4 x float> [[VALUE:%.*]], <vscale x 4 x float>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex,
+ vfloat32m2_t value, size_t vl) {
+ return vsuxei64_v_f32m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64.i32(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to <vscale x 8 x float>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64.i64(<vscale x 8 x float> [[VALUE:%.*]], <vscale x 8 x float>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex,
+ vfloat32m4_t value, size_t vl) {
+ return vsuxei64_v_f32m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i8> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei8_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i8> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsuxei8_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i8> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsuxei8_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei8_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i8> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsuxei8_v_f64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i16> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei16_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i16> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsuxei16_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i16> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsuxei16_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei16_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i16> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsuxei16_v_f64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei32_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i32> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsuxei32_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i32> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsuxei32_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei32_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i32> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsuxei32_v_f64m8_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64.i32(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 1 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64.i64(<vscale x 1 x double> [[VALUE:%.*]], <vscale x 1 x double>* [[TMP0]], <vscale x 1 x i64> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex,
+ vfloat64m1_t value, size_t vl) {
+ return vsuxei64_v_f64m1_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64.i32(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 2 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64.i64(<vscale x 2 x double> [[VALUE:%.*]], <vscale x 2 x double>* [[TMP0]], <vscale x 2 x i64> [[BINDEX:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex,
+ vfloat64m2_t value, size_t vl) {
+ return vsuxei64_v_f64m2_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64.i32(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 4 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64.i64(<vscale x 4 x double> [[VALUE:%.*]], <vscale x 4 x double>* [[TMP0]], <vscale x 4 x i64> [[BINDEX:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex,
+ vfloat64m4_t value, size_t vl) {
+ return vsuxei64_v_f64m4_m(mask, base, bindex, value, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsuxei64_v_f64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV32-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64.i32(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret void
+//
+// CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to <vscale x 8 x double>*
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64.i64(<vscale x 8 x double> [[VALUE:%.*]], <vscale x 8 x double>* [[TMP0]], <vscale x 8 x i64> [[BINDEX:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex,
+ vfloat64m8_t value, size_t vl) {
+ return vsuxei64_v_f64m8_m(mask, base, bindex, value, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
new file mode 100644
index 000000000000..34071a7af902
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c
@@ -0,0 +1,3524 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwadd_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
+ return vwadd_wv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i32(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwadd_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
+ return vwadd_wv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i32(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwadd_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
+ return vwadd_wv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i32(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwadd_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
+ return vwadd_wv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i32(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwadd_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
+ return vwadd_wv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i32(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwadd_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
+ return vwadd_wv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i32(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vwadd_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
+ return vwadd_wv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i32(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwadd_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
+ return vwadd_wv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i32(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwadd_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
+ return vwadd_wv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i32(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwadd_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
+ return vwadd_wv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i32(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwadd_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
+ return vwadd_wv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i32(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwadd_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
+ return vwadd_wv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwadd_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
+ return vwadd_wv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwadd_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
+ return vwadd_wv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwadd_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
+ return vwadd_wv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwaddu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwaddu_wv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i32(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwaddu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwaddu_wv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i32(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwaddu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwaddu_wv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i32(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwaddu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
+ return vwaddu_wv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i32(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwaddu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
+ return vwaddu_wv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i32(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwaddu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
+ return vwaddu_wv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i32(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwaddu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwaddu_wv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i32(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwaddu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwaddu_wv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i32(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwaddu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
+ return vwaddu_wv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i32(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwaddu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
+ return vwaddu_wv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i32(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwaddu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
+ return vwaddu_wv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i32(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwaddu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwaddu_wv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwaddu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
+ return vwaddu_wv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwaddu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
+ return vwaddu_wv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwaddu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
+ return vwaddu_wv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
+ return vwadd_wv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
+ return vwadd_wv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint8mf2_t op2, size_t vl) {
+ return vwadd_wv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint8m1_t op2, size_t vl) {
+ return vwadd_wv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint8m2_t op2, size_t vl) {
+ return vwadd_wv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint8m4_t op2, size_t vl) {
+ return vwadd_wv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int8_t op2, size_t vl) {
+ return vwadd_wx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwadd_wv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint16mf2_t op2, size_t vl) {
+ return vwadd_wv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint16m1_t op2, size_t vl) {
+ return vwadd_wv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint16m2_t op2, size_t vl) {
+ return vwadd_wv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint16m4_t op2, size_t vl) {
+ return vwadd_wv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int16_t op2, size_t vl) {
+ return vwadd_wx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint32mf2_t op2, size_t vl) {
+ return vwadd_wv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint32m1_t op2, size_t vl) {
+ return vwadd_wv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint32m2_t op2, size_t vl) {
+ return vwadd_wv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint32m4_t op2, size_t vl) {
+ return vwadd_wv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwadd_wx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int32_t op2, size_t vl) {
+ return vwadd_wx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwaddu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwaddu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwaddu_wv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
+ return vwaddu_wv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
+ return vwaddu_wv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
+ return vwaddu_wv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint8_t op2, size_t vl) {
+ return vwaddu_wx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vwaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwaddu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vwaddu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwaddu_wv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwaddu_wv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwaddu_wv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwaddu_wv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint16_t op2, size_t vl) {
+ return vwaddu_wx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwaddu_wv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwaddu_wv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwaddu_wv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwaddu_wv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwaddu_wx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint32_t op2, size_t vl) {
+ return vwaddu_wx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c
new file mode 100644
index 000000000000..caffdf75e923
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c
@@ -0,0 +1,3186 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
+ vint8mf8_t op2, size_t vl) {
+ return vwmacc_vv_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
+ size_t vl) {
+ return vwmacc_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
+ vint8mf4_t op2, size_t vl) {
+ return vwmacc_vv_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
+ size_t vl) {
+ return vwmacc_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
+ size_t vl) {
+ return vwmacc_vv_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
+ size_t vl) {
+ return vwmacc_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
+ size_t vl) {
+ return vwmacc_vv_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
+ size_t vl) {
+ return vwmacc_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
+ size_t vl) {
+ return vwmacc_vv_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
+ size_t vl) {
+ return vwmacc_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
+ size_t vl) {
+ return vwmacc_vv_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
+ size_t vl) {
+ return vwmacc_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
+ vint16mf4_t op2, size_t vl) {
+ return vwmacc_vv_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwmacc_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
+ vint16mf2_t op2, size_t vl) {
+ return vwmacc_vv_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vwmacc_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
+ size_t vl) {
+ return vwmacc_vv_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
+ size_t vl) {
+ return vwmacc_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
+ size_t vl) {
+ return vwmacc_vv_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
+ size_t vl) {
+ return vwmacc_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
+ size_t vl) {
+ return vwmacc_vv_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
+ size_t vl) {
+ return vwmacc_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
+ vint32mf2_t op2, size_t vl) {
+ return vwmacc_vv_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vwmacc_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
+ size_t vl) {
+ return vwmacc_vv_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
+ size_t vl) {
+ return vwmacc_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
+ size_t vl) {
+ return vwmacc_vv_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
+ size_t vl) {
+ return vwmacc_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
+ size_t vl) {
+ return vwmacc_vv_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
+ size_t vl) {
+ return vwmacc_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
+ vuint8mf8_t op2, size_t vl) {
+ return vwmaccu_vv_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
+ vuint8mf8_t op2, size_t vl) {
+ return vwmaccu_vx_u16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
+ vuint8mf4_t op2, size_t vl) {
+ return vwmaccu_vv_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
+ vuint8mf4_t op2, size_t vl) {
+ return vwmaccu_vx_u16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
+ vuint8mf2_t op2, size_t vl) {
+ return vwmaccu_vv_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwmaccu_vx_u16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
+ vuint8m1_t op2, size_t vl) {
+ return vwmaccu_vv_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
+ size_t vl) {
+ return vwmaccu_vx_u16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
+ vuint8m2_t op2, size_t vl) {
+ return vwmaccu_vv_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
+ size_t vl) {
+ return vwmaccu_vx_u16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
+ vuint8m4_t op2, size_t vl) {
+ return vwmaccu_vv_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
+ size_t vl) {
+ return vwmaccu_vx_u16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
+ vuint16mf4_t op2, size_t vl) {
+ return vwmaccu_vv_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
+ vuint16mf4_t op2, size_t vl) {
+ return vwmaccu_vx_u32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
+ vuint16mf2_t op2, size_t vl) {
+ return vwmaccu_vv_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
+ vuint16mf2_t op2, size_t vl) {
+ return vwmaccu_vx_u32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
+ vuint16m1_t op2, size_t vl) {
+ return vwmaccu_vv_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
+ vuint16m1_t op2, size_t vl) {
+ return vwmaccu_vx_u32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
+ vuint16m2_t op2, size_t vl) {
+ return vwmaccu_vv_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
+ vuint16m2_t op2, size_t vl) {
+ return vwmaccu_vx_u32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
+ vuint16m4_t op2, size_t vl) {
+ return vwmaccu_vv_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
+ vuint16m4_t op2, size_t vl) {
+ return vwmaccu_vx_u32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
+ vuint32mf2_t op2, size_t vl) {
+ return vwmaccu_vv_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
+ vuint32mf2_t op2, size_t vl) {
+ return vwmaccu_vx_u64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
+ vuint32m1_t op2, size_t vl) {
+ return vwmaccu_vv_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
+ vuint32m1_t op2, size_t vl) {
+ return vwmaccu_vx_u64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
+ vuint32m2_t op2, size_t vl) {
+ return vwmaccu_vv_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
+ vuint32m2_t op2, size_t vl) {
+ return vwmaccu_vx_u64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
+ vuint32m4_t op2, size_t vl) {
+ return vwmaccu_vv_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
+ vuint32m4_t op2, size_t vl) {
+ return vwmaccu_vx_u64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
+ vuint8mf8_t op2, size_t vl) {
+ return vwmaccsu_vv_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
+ vuint8mf8_t op2, size_t vl) {
+ return vwmaccsu_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
+ vuint8mf4_t op2, size_t vl) {
+ return vwmaccsu_vv_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
+ vuint8mf4_t op2, size_t vl) {
+ return vwmaccsu_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
+ vuint8mf2_t op2, size_t vl) {
+ return vwmaccsu_vv_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
+ vuint16mf4_t op2, size_t vl) {
+ return vwmaccsu_vv_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
+ vuint16mf4_t op2, size_t vl) {
+ return vwmaccsu_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
+ vuint16mf2_t op2, size_t vl) {
+ return vwmaccsu_vv_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
+ vuint16m1_t op2, size_t vl) {
+ return vwmaccsu_vv_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
+ vuint16m2_t op2, size_t vl) {
+ return vwmaccsu_vv_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
+ vuint16m4_t op2, size_t vl) {
+ return vwmaccsu_vv_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
+ vuint32mf2_t op2, size_t vl) {
+ return vwmaccsu_vv_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
+ vuint32m1_t op2, size_t vl) {
+ return vwmaccsu_vv_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
+ vuint32m2_t op2, size_t vl) {
+ return vwmaccsu_vv_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
+ vuint32m4_t op2, size_t vl) {
+ return vwmaccsu_vv_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
+ vint8mf8_t op2, size_t vl) {
+ return vwmaccus_vx_i16mf4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
+ vint8mf4_t op2, size_t vl) {
+ return vwmaccus_vx_i16mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i16m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i16m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i16m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i16m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
+ vint16mf4_t op2, size_t vl) {
+ return vwmaccus_vx_i32mf2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i32m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i32m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i32m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i32m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i64m1(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i64m2(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i64m4(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i64m8(acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwmacc_vv_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
+ vint8mf8_t op2, size_t vl) {
+ return vwmacc_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwmacc_vv_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
+ vint8mf4_t op2, size_t vl) {
+ return vwmacc_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwmacc_vv_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
+ vint8mf2_t op2, size_t vl) {
+ return vwmacc_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
+ vint8m1_t op2, size_t vl) {
+ return vwmacc_vv_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
+ vint8m1_t op2, size_t vl) {
+ return vwmacc_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
+ vint8m2_t op2, size_t vl) {
+ return vwmacc_vv_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
+ vint8m2_t op2, size_t vl) {
+ return vwmacc_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
+ vint8m4_t op2, size_t vl) {
+ return vwmacc_vv_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
+ vint8m4_t op2, size_t vl) {
+ return vwmacc_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwmacc_vv_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
+ int16_t op1, vint16mf4_t op2, size_t vl) {
+ return vwmacc_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
+ vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwmacc_vv_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
+ vint16mf2_t op2, size_t vl) {
+ return vwmacc_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwmacc_vv_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
+ vint16m1_t op2, size_t vl) {
+ return vwmacc_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
+ vint16m2_t op2, size_t vl) {
+ return vwmacc_vv_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
+ vint16m2_t op2, size_t vl) {
+ return vwmacc_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
+ vint16m4_t op2, size_t vl) {
+ return vwmacc_vv_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
+ vint16m4_t op2, size_t vl) {
+ return vwmacc_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
+ vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwmacc_vv_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
+ vint32mf2_t op2, size_t vl) {
+ return vwmacc_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwmacc_vv_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
+ vint32m1_t op2, size_t vl) {
+ return vwmacc_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwmacc_vv_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
+ vint32m2_t op2, size_t vl) {
+ return vwmacc_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
+ vint32m4_t op2, size_t vl) {
+ return vwmacc_vv_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmacc_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
+ vint32m4_t op2, size_t vl) {
+ return vwmacc_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
+ uint8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmaccu_vx_u16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
+ uint8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmaccu_vx_u16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
+ uint8_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwmaccu_vx_u16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmaccu_vv_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
+ vuint8m1_t op2, size_t vl) {
+ return vwmaccu_vx_u16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmaccu_vv_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
+ vuint8m2_t op2, size_t vl) {
+ return vwmaccu_vx_u16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmaccu_vv_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
+ vuint8m4_t op2, size_t vl) {
+ return vwmaccu_vx_u16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
+ uint16_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwmaccu_vx_u32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
+ uint16_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwmaccu_vx_u32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
+ uint16_t op1, vuint16m1_t op2, size_t vl) {
+ return vwmaccu_vx_u32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
+ uint16_t op1, vuint16m2_t op2, size_t vl) {
+ return vwmaccu_vx_u32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
+ uint16_t op1, vuint16m4_t op2, size_t vl) {
+ return vwmaccu_vx_u32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
+ uint32_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwmaccu_vx_u64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
+ uint32_t op1, vuint32m1_t op2, size_t vl) {
+ return vwmaccu_vx_u64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
+ uint32_t op1, vuint32m2_t op2, size_t vl) {
+ return vwmaccu_vx_u64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwmaccu_vv_u64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
+ uint32_t op1, vuint32m4_t op2, size_t vl) {
+ return vwmaccu_vx_u64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
+ vint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
+ int8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmaccsu_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
+ vint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
+ int8_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmaccsu_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
+ vint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
+ vuint8mf2_t op2, size_t vl) {
+ return vwmaccsu_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
+ vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmaccsu_vv_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
+ vuint8m1_t op2, size_t vl) {
+ return vwmaccsu_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
+ vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmaccsu_vv_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
+ vuint8m2_t op2, size_t vl) {
+ return vwmaccsu_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
+ vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmaccsu_vv_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
+ vuint8m4_t op2, size_t vl) {
+ return vwmaccsu_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
+ vint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
+ int16_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwmaccsu_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
+ vint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
+ vuint16mf2_t op2, size_t vl) {
+ return vwmaccsu_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
+ vint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
+ vuint16m1_t op2, size_t vl) {
+ return vwmaccsu_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
+ vint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
+ vuint16m2_t op2, size_t vl) {
+ return vwmaccsu_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
+ vint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
+ vuint16m4_t op2, size_t vl) {
+ return vwmaccsu_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
+ vint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
+ vuint32mf2_t op2, size_t vl) {
+ return vwmaccsu_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
+ vint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
+ vuint32m1_t op2, size_t vl) {
+ return vwmaccsu_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
+ vint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
+ vuint32m2_t op2, size_t vl) {
+ return vwmaccsu_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
+ vint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwmaccsu_vv_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccsu_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
+ vuint32m4_t op2, size_t vl) {
+ return vwmaccsu_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i32(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
+ uint8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwmaccus_vx_i16mf4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i32(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
+ uint8_t op1, vint8mf4_t op2, size_t vl) {
+ return vwmaccus_vx_i16mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i32(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
+ vint8mf2_t op2, size_t vl) {
+ return vwmaccus_vx_i16m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i32(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
+ vint8m1_t op2, size_t vl) {
+ return vwmaccus_vx_i16m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i32(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
+ vint8m2_t op2, size_t vl) {
+ return vwmaccus_vx_i16m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i32(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
+ vint8m4_t op2, size_t vl) {
+ return vwmaccus_vx_i16m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i32(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
+ uint16_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwmaccus_vx_i32mf2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i32(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
+ uint16_t op1, vint16mf2_t op2, size_t vl) {
+ return vwmaccus_vx_i32m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i32(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
+ uint16_t op1, vint16m1_t op2, size_t vl) {
+ return vwmaccus_vx_i32m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i32(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
+ vint16m2_t op2, size_t vl) {
+ return vwmaccus_vx_i32m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i32(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
+ vint16m4_t op2, size_t vl) {
+ return vwmaccus_vx_i32m8_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i32(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
+ uint32_t op1, vint32mf2_t op2, size_t vl) {
+ return vwmaccus_vx_i64m1_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i32(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
+ uint32_t op1, vint32m1_t op2, size_t vl) {
+ return vwmaccus_vx_i64m2_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i32(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
+ uint32_t op1, vint32m2_t op2, size_t vl) {
+ return vwmaccus_vx_i64m4_m(mask, acc, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmaccus_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i32(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1,
+ vint32m4_t op2, size_t vl) {
+ return vwmaccus_vx_i64m8_m(mask, acc, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
new file mode 100644
index 000000000000..7d4a02f22583
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c
@@ -0,0 +1,2528 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmul_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwmul_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmul_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmul_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwmul_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmul_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmul_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwmul_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmul_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmul_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwmul_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmul_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmul_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwmul_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmul_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmul_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwmul_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmul_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmul_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vwmul_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmul_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwmul_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmul_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwmul_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmul_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwmul_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmul_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwmul_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmul_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwmul_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmul_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwmul_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmul_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwmul_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmul_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwmul_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmul_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwmul_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmul_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwmul_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmul_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwmul_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmul_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwmul_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmul_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwmul_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmul_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwmul_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmul_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwmul_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmul_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwmul_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmul_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwmul_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmulu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmulu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmulu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmulu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmulu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmulu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmulu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwmulu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmulu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmulu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmulu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmulu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmulu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmulu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmulu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmulu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmulu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmulu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmulu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vwmulu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmulu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwmulu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmulu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwmulu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmulu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwmulu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmulu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwmulu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmulu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwmulu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmulu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwmulu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmulu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwmulu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmulu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwmulu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmulu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwmulu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmulu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwmulu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmulu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwmulu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmulu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwmulu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmulu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwmulu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmulu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwmulu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmulu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwmulu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmulu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwmulu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmulu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwmulu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmulsu_vv_i16mf4(vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmulsu_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmulsu_vx_i16mf4(vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmulsu_vv_i16mf2(vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmulsu_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmulsu_vx_i16mf2(vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmulsu_vv_i16m1(vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwmulsu_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmulsu_vx_i16m1(vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmulsu_vv_i16m2(vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmulsu_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmulsu_vx_i16m2(vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmulsu_vv_i16m4(vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmulsu_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmulsu_vx_i16m4(vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmulsu_vv_i16m8(vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmulsu_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmulsu_vx_i16m8(vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmulsu_vv_i32mf2(vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vwmulsu_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmulsu_vx_i32mf2(vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmulsu_vv_i32m1(vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwmulsu_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmulsu_vx_i32m1(vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmulsu_vv_i32m2(vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwmulsu_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmulsu_vx_i32m2(vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmulsu_vv_i32m4(vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwmulsu_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmulsu_vx_i32m4(vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmulsu_vv_i32m8(vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwmulsu_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmulsu_vx_i32m8(vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmulsu_vv_i64m1(vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwmulsu_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmulsu_vx_i64m1(vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmulsu_vv_i64m2(vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwmulsu_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmulsu_vx_i64m2(vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmulsu_vv_i64m4(vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwmulsu_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmulsu_vx_i64m4(vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwmulsu_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vwmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmul_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmulu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmulu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwmulu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmulu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmulu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmulu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwmulu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vwmulu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwmulu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwmulu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwmulu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwmulu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwmulu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwmulu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwmulu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwmulu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwmulu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwmulu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwmulu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwmulu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwmulu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwmulu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwmulu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwmulu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwmulu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vwmulsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vwmulsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwmulsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwmulsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwmulsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwmulsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwmulsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vwmulsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwmulsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwmulsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwmulsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwmulsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwmulsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwmulsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwmulsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwmulsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwmulsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwmulsu_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwmulsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
new file mode 100644
index 000000000000..3ccba3e5949e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
@@ -0,0 +1,1124 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum_vs_i8mf8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum_vs_i8mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum_vs_i8mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum_vs_i8m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum_vs_i8m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum_vs_i8m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector,
+ vint16m1_t scalar, size_t vl) {
+ return vwredsum_vs_i8m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum_vs_i16mf4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum_vs_i16mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum_vs_i16m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum_vs_i16m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum_vs_i16m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum_vs_i16m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum_vs_i32mf2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum_vs_i32m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum_vs_i32m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum_vs_i32m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum_vs_i32m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8mf8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16mf4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32mf2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst,
+ vint8mf8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i8mf8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst,
+ vint8mf4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i8mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst,
+ vint8mf2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i8mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst,
+ vint8m1_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i8m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst,
+ vint8m2_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i8m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst,
+ vint8m4_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i8m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst,
+ vint8m8_t vector, vint16m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i8m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst,
+ vint16mf4_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum_vs_i16mf4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst,
+ vint16mf2_t vector,
+ vint32m1_t scalar, size_t vl) {
+ return vwredsum_vs_i16mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst,
+ vint16m1_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i16m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst,
+ vint16m2_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i16m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst,
+ vint16m4_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i16m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst,
+ vint16m8_t vector, vint32m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i16m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst,
+ vint32mf2_t vector,
+ vint64m1_t scalar, size_t vl) {
+ return vwredsum_vs_i32mf2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst,
+ vint32m1_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i32m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst,
+ vint32m2_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i32m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst,
+ vint32m4_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i32m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst,
+ vint32m8_t vector, vint64m1_t scalar,
+ size_t vl) {
+ return vwredsum_vs_i32m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+ vuint8mf8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8mf8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+ vuint8mf4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+ vuint8mf2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+ vuint8m1_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+ vuint8m2_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+ vuint8m4_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst,
+ vuint8m8_t vector,
+ vuint16m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u8m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+ vuint16mf4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16mf4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+ vuint16mf2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+ vuint16m1_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+ vuint16m2_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+ vuint16m4_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst,
+ vuint16m8_t vector,
+ vuint32m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u16m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+ vuint32mf2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32mf2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+ vuint32m1_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+ vuint32m2_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+ vuint32m4_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dst,
+ vuint32m8_t vector,
+ vuint64m1_t scalar, size_t vl) {
+ return vwredsumu_vs_u32m8_u64m1_m(mask, dst, vector, scalar, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
new file mode 100644
index 000000000000..dd29f7262775
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c
@@ -0,0 +1,3524 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwsub_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
+ return vwsub_wv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i32(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwsub_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
+ return vwsub_wv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i32(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwsub_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
+ return vwsub_wv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i32(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwsub_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
+ return vwsub_wv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i32(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwsub_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
+ return vwsub_wv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i32(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwsub_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
+ return vwsub_wv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i32(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vwsub_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
+ return vwsub_wv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i32(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwsub_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
+ return vwsub_wv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i32(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwsub_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
+ return vwsub_wv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i32(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwsub_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
+ return vwsub_wv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i32(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwsub_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
+ return vwsub_wv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i32(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwsub_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
+ return vwsub_wv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwsub_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
+ return vwsub_wv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwsub_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
+ return vwsub_wv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwsub_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
+ return vwsub_wv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwsubu_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwsubu_wv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i32(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwsubu_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwsubu_wv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i32(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwsubu_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
+ return vwsubu_wv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i32(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwsubu_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
+ return vwsubu_wv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i32(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwsubu_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
+ return vwsubu_wv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i32(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwsubu_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
+ return vwsubu_wv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i32(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwsubu_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwsubu_wv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i32(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwsubu_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
+ return vwsubu_wv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i32(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vwsubu_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
+ return vwsubu_wv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i32(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vwsubu_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
+ return vwsubu_wv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i32(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vwsubu_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
+ return vwsubu_wv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i32(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwsubu_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
+ return vwsubu_wv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i32(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vwsubu_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
+ return vwsubu_wv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i32(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vwsubu_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
+ return vwsubu_wv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i32(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vwsubu_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
+ return vwsubu_wv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i32(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vwsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
+ return vwsub_wv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+ vint16mf4_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vwsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
+ return vwsub_wv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+ vint16mf2_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vwsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, vint8mf2_t op2, size_t vl) {
+ return vwsub_wv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+ vint16m1_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vwsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint8m1_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, vint8m1_t op2, size_t vl) {
+ return vwsub_wv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+ vint16m2_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vwsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint8m2_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, vint8m2_t op2, size_t vl) {
+ return vwsub_wv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+ vint16m4_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vwsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint8m4_t op1, int8_t op2, size_t vl) {
+ return vwsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, vint8m4_t op2, size_t vl) {
+ return vwsub_wv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+ vint16m8_t op1, int8_t op2, size_t vl) {
+ return vwsub_wx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint16mf4_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, vint16mf4_t op2,
+ size_t vl) {
+ return vwsub_wv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+ vint32mf2_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vwsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, vint16mf2_t op2, size_t vl) {
+ return vwsub_wv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+ vint32m1_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vwsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint16m1_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, vint16m1_t op2, size_t vl) {
+ return vwsub_wv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+ vint32m2_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vwsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint16m2_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, vint16m2_t op2, size_t vl) {
+ return vwsub_wv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+ vint32m4_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vwsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint16m4_t op1, int16_t op2, size_t vl) {
+ return vwsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, vint16m4_t op2, size_t vl) {
+ return vwsub_wv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+ vint32m8_t op1, int16_t op2, size_t vl) {
+ return vwsub_wx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vwsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, vint32mf2_t op2, size_t vl) {
+ return vwsub_wv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+ vint64m1_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vwsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint32m1_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, vint32m1_t op2, size_t vl) {
+ return vwsub_wv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+ vint64m2_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vwsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint32m2_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, vint32m2_t op2, size_t vl) {
+ return vwsub_wv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+ vint64m4_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vwsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint32m4_t op1, int32_t op2, size_t vl) {
+ return vwsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, vint32m4_t op2, size_t vl) {
+ return vwsub_wv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsub_wx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+ vint64m8_t op1, int32_t op2, size_t vl) {
+ return vwsub_wx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint8mf8_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwsubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, vuint8mf8_t op2,
+ size_t vl) {
+ return vwsubu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint16mf4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint8mf4_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwsubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, vuint8mf4_t op2,
+ size_t vl) {
+ return vwsubu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint16mf2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint8mf2_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwsubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, vuint8mf2_t op2,
+ size_t vl) {
+ return vwsubu_wv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint16m1_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vwsubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
+ return vwsubu_wv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint16m2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vwsubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
+ return vwsubu_wv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint16m4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vwsubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
+ return vwsubu_wv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint16m8_t op1, uint8_t op2, size_t vl) {
+ return vwsubu_wx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint16mf4_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwsubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint16mf4_t op1, uint16_t op2,
+ size_t vl) {
+ return vwsubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, vuint16mf4_t op2,
+ size_t vl) {
+ return vwsubu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint32mf2_t op1, uint16_t op2,
+ size_t vl) {
+ return vwsubu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint16mf2_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwsubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, vuint16mf2_t op2,
+ size_t vl) {
+ return vwsubu_wv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint32m1_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint16m1_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwsubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, vuint16m1_t op2,
+ size_t vl) {
+ return vwsubu_wv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint32m2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint16m2_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwsubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, vuint16m2_t op2,
+ size_t vl) {
+ return vwsubu_wv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint32m4_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint16m4_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwsubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, vuint16m4_t op2,
+ size_t vl) {
+ return vwsubu_wv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint32m8_t op1, uint16_t op2, size_t vl) {
+ return vwsubu_wx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint32mf2_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwsubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, vuint32mf2_t op2,
+ size_t vl) {
+ return vwsubu_wv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint64m1_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint32m1_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwsubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, vuint32m1_t op2,
+ size_t vl) {
+ return vwsubu_wv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint64m2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint32m2_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwsubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, vuint32m2_t op2,
+ size_t vl) {
+ return vwsubu_wv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint64m4_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint32m4_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwsubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, vuint32m4_t op2,
+ size_t vl) {
+ return vwsubu_wv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwsubu_wx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint64m8_t op1, uint32_t op2, size_t vl) {
+ return vwsubu_wx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
new file mode 100644
index 000000000000..2351c55bd7f5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c
@@ -0,0 +1,2472 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vxor_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vxor_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vxor_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vxor_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vxor_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vxor_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vxor_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vxor_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vxor_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vxor_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vxor_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vxor_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vxor_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vxor_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vxor_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vxor_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vxor_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vxor_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vxor_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vxor_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vxor_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vxor_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vxor_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vxor_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vxor_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vxor_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vxor_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vxor_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vxor_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i32(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.nxv64i8.i8.i64(<vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vxor_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vxor_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vxor_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vxor_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vxor_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vxor_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i32(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.nxv32i16.i16.i64(<vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vxor_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vxor_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vxor_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vxor_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vxor_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i32(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.nxv16i32.i32.i64(<vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vxor_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i32(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vxor_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i32(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vxor_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i32(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vxor_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i32(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+ return vxor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+ return vxor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+ return vxor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+ return vxor_vv_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+ return vxor_vv_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+ return vxor_vv_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+ return vxor_vv_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
+ return vxor_vx_i8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+ return vxor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+ return vxor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+ return vxor_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+ return vxor_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+ return vxor_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+ return vxor_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
+ return vxor_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+ return vxor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+ return vxor_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+ return vxor_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+ return vxor_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+ return vxor_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
+ return vxor_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+ return vxor_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+ return vxor_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+ return vxor_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+ return vxor_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_i64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
+ return vxor_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+ return vxor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i32(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vxor.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+ return vxor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i32(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vxor.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+ return vxor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i32(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vxor.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+ return vxor_vv_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i32(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vxor.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+ return vxor_vv_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i32(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vxor.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+ return vxor_vv_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i32(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vxor.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+ return vxor_vv_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u8m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i32(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vxor.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
+ return vxor_vx_u8m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+ return vxor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vxor.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+ return vxor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vxor.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+ return vxor_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vxor.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+ return vxor_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vxor.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+ return vxor_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vxor.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+ return vxor_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vxor.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
+ return vxor_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+ return vxor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+ return vxor_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vxor.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+ return vxor_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vxor.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+ return vxor_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vxor.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+ return vxor_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vxor.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
+ return vxor_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+ return vxor_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vxor.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+ return vxor_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vxor.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+ return vxor_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vxor.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vv_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+ return vxor_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vxor_vx_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vxor.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
+ return vxor_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
new file mode 100644
index 000000000000..5fd4609715f9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
@@ -0,0 +1,820 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
+ return vzext_vf2_u16mf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
+ return vzext_vf2_u16mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
+ return vzext_vf2_u16m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
+ return vzext_vf2_u16m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
+ return vzext_vf2_u16m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
+ return vzext_vf2_u16m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
+ return vzext_vf4_u32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
+ return vzext_vf4_u32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
+ return vzext_vf4_u32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
+ return vzext_vf4_u32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
+ return vzext_vf4_u32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
+ return vzext_vf8_u64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
+ return vzext_vf8_u64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
+ return vzext_vf8_u64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
+ return vzext_vf8_u64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32mf2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
+ return vzext_vf2_u32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
+ return vzext_vf2_u32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
+ return vzext_vf2_u32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
+ return vzext_vf2_u32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
+ return vzext_vf2_u32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
+ return vzext_vf4_u64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
+ return vzext_vf4_u64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
+ return vzext_vf4_u64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
+ return vzext_vf4_u64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m1(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
+ return vzext_vf2_u64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m2(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
+ return vzext_vf2_u64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m4(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
+ return vzext_vf2_u64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m8(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
+ return vzext_vf2_u64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+ vuint8mf8_t op1, size_t vl) {
+ return vzext_vf2_u16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+ vuint8mf4_t op1, size_t vl) {
+ return vzext_vf2_u16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+ vuint8mf2_t op1, size_t vl) {
+ return vzext_vf2_u16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+ vuint8m1_t op1, size_t vl) {
+ return vzext_vf2_u16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+ vuint8m2_t op1, size_t vl) {
+ return vzext_vf2_u16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+ vuint8m4_t op1, size_t vl) {
+ return vzext_vf2_u16m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint8mf8_t op1, size_t vl) {
+ return vzext_vf4_u32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint8mf4_t op1, size_t vl) {
+ return vzext_vf4_u32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint8mf2_t op1, size_t vl) {
+ return vzext_vf4_u32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint8m1_t op1, size_t vl) {
+ return vzext_vf4_u32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint8m2_t op1, size_t vl) {
+ return vzext_vf4_u32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint8mf8_t op1, size_t vl) {
+ return vzext_vf8_u64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint8mf4_t op1, size_t vl) {
+ return vzext_vf8_u64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint8mf2_t op1, size_t vl) {
+ return vzext_vf8_u64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint8m1_t op1, size_t vl) {
+ return vzext_vf8_u64m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32mf2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+ vuint16mf4_t op1, size_t vl) {
+ return vzext_vf2_u32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+ vuint16mf2_t op1, size_t vl) {
+ return vzext_vf2_u32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+ vuint16m1_t op1, size_t vl) {
+ return vzext_vf2_u32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+ vuint16m2_t op1, size_t vl) {
+ return vzext_vf2_u32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+ vuint16m4_t op1, size_t vl) {
+ return vzext_vf2_u32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint16mf4_t op1, size_t vl) {
+ return vzext_vf4_u64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint16mf2_t op1, size_t vl) {
+ return vzext_vf4_u64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint16m1_t op1, size_t vl) {
+ return vzext_vf4_u64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint16m2_t op1, size_t vl) {
+ return vzext_vf4_u64m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m1_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+ vuint32mf2_t op1, size_t vl) {
+ return vzext_vf2_u64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m2_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+ vuint32m1_t op1, size_t vl) {
+ return vzext_vf2_u64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m4_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+ vuint32m2_t op1, size_t vl) {
+ return vzext_vf2_u64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m8_m(
+// CHECK-RV32-NEXT: entry:
+// CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+ vuint32m4_t op1, size_t vl) {
+ return vzext_vf2_u64m8_m(mask, maskedoff, op1, vl);
+}